file
stringlengths
6
44
content
stringlengths
38
162k
__init__.py
null
__init__.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """
__init__.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """
input_drift.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ class input_drift(): def __init__(self, tab_size=4): self.tab = ' ' * tab_size self.codeText = '' def addInputDriftClass(self): text = "\ \nclass inputdrift():\ \n\ \n def __init__(self,base_config):\ \n self.usecase = base_config['modelName'] + '_' + base_config['modelVersion']\ \n self.currentDataLocation = base_config['currentDataLocation']\ \n home = Path.home()\ \n if platform.system() == 'Windows':\ \n from pathlib import WindowsPath\ \n output_data_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'Data'\ \n output_model_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'target'/self.usecase\ \n else:\ \n from pathlib import PosixPath\ \n output_data_dir = PosixPath(home)/'HCLT'/'AION'/'Data'\ \n output_model_dir = PosixPath(home)/'HCLT'/'AION'/'target'/self.usecase\ \n if not output_model_dir.exists():\ \n raise ValueError(f'Configuration file not found at {output_model_dir}')\ \n\ \n tracking_uri = 'file:///' + str(Path(output_model_dir)/'mlruns')\ \n registry_uri = 'sqlite:///' + str(Path(output_model_dir)/'mlruns.db')\ \n mlflow.set_tracking_uri(tracking_uri)\ \n mlflow.set_registry_uri(registry_uri)\ \n client = mlflow.tracking.MlflowClient(\ \n tracking_uri=tracking_uri,\ \n registry_uri=registry_uri,\ \n )\ \n model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase)\ \n model = mlflow.pyfunc.load_model(model_version_uri)\ \n run = client.get_run(model.metadata.run_id)\ \n if run.info.artifact_uri.startswith('file:'):\ \n artifact_path = Path(run.info.artifact_uri[len('file:///') : ])\ \n else:\ \n artifact_path = Path(run.info.artifact_uri)\ \n self.trainingDataPath = artifact_path/(self.usecase + '_data.csv')\ \n\ \n def get_input_drift(self,current_data, historical_data):\ \n curr_num_feat = current_data.select_dtypes(include='number')\ \n hist_num_feat = historical_data.select_dtypes(include='number')\ \n num_features = [feat for feat in historical_data.columns if feat in curr_num_feat]\ \n alert_count = 0\ \n data = {\ \n 'current':{'data':current_data},\ \n 'hist': {'data': historical_data}\ \n }\ \n dist_changed_columns = []\ \n dist_change_message = []\ \n for feature in num_features:\ \n curr_static_value = st.ks_2samp( hist_num_feat[feature], curr_num_feat[feature]).pvalue\ \n if (curr_static_value < 0.05):\ \n distribution = {}\ \n distribution['hist'] = self.DistributionFinder( historical_data[feature])\ \n distribution['curr'] = self.DistributionFinder( current_data[feature])\ \n if(distribution['hist']['name'] == distribution['curr']['name']):\ \n pass\ \n else:\ \n alert_count = alert_count + 1\ \n dist_changed_columns.append(feature)\ \n changed_column = {}\ \n changed_column['Feature'] = feature\ \n changed_column['KS_Training'] = curr_static_value\ \n changed_column['Training_Distribution'] = distribution['hist']['name']\ \n changed_column['New_Distribution'] = distribution['curr']['name']\ \n dist_change_message.append(changed_column)\ \n if alert_count:\ \n resultStatus = dist_change_message\ \n else :\ \n resultStatus='Model is working as expected'\ \n return(alert_count, resultStatus)\ \n\ \n def DistributionFinder(self,data):\ \n best_distribution =''\ \n best_sse =0.0\ \n if(data.dtype in ['int','int64']):\ \n distributions= {'bernoulli':{'algo':st.bernoulli},\ \n 'binom':{'algo':st.binom},\ \n 'geom':{'algo':st.geom},\ \n 'nbinom':{'algo':st.nbinom},\ \n 'poisson':{'algo':st.poisson}\ \n }\ \n index, counts = np.unique(data.astype(int),return_counts=True)\ \n if(len(index)>=2):\ \n best_sse = np.inf\ \n y1=[]\ \n total=sum(counts)\ \n mean=float(sum(index*counts))/total\ \n variance=float((sum(index**2*counts) -total*mean**2))/(total-1)\ \n dispersion=mean/float(variance)\ \n theta=1/float(dispersion)\ \n r=mean*(float(theta)/1-theta)\ \n\ \n for j in counts:\ \n y1.append(float(j)/total)\ \n distributions['bernoulli']['pmf'] = distributions['bernoulli']['algo'].pmf(index,mean)\ \n distributions['binom']['pmf'] = distributions['binom']['algo'].pmf(index,len(index),p=mean/len(index))\ \n distributions['geom']['pmf'] = distributions['geom']['algo'].pmf(index,1/float(1+mean))\ \n distributions['nbinom']['pmf'] = distributions['nbinom']['algo'].pmf(index,mean,r)\ \n distributions['poisson']['pmf'] = distributions['poisson']['algo'].pmf(index,mean)\ \n\ \n sselist = []\ \n for dist in distributions.keys():\ \n distributions[dist]['sess'] = np.sum(np.power(y1 - distributions[dist]['pmf'], 2.0))\ \n if np.isnan(distributions[dist]['sess']):\ \n distributions[dist]['sess'] = float('inf')\ \n best_dist = min(distributions, key=lambda v: distributions[v]['sess'])\ \n best_distribution = best_dist\ \n best_sse = distributions[best_dist]['sess']\ \n\ \n elif (len(index) == 1):\ \n best_distribution = 'Constant Data-No Distribution'\ \n best_sse = 0.0\ \n elif(data.dtype in ['float64','float32']):\ \n distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta]\ \n best_distribution = st.norm.name\ \n best_sse = np.inf\ \n nrange = data.max() - data.min()\ \n\ \n y, x = np.histogram(data.astype(float), bins='auto', density=True)\ \n x = (x + np.roll(x, -1))[:-1] / 2.0\ \n\ \n for distribution in distributions:\ \n with warnings.catch_warnings():\ \n warnings.filterwarnings('ignore')\ \n params = distribution.fit(data.astype(float))\ \n arg = params[:-2]\ \n loc = params[-2]\ \n scale = params[-1]\ \n pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)\ \n sse = np.sum(np.power(y - pdf, 2.0))\ \n if( sse < best_sse):\ \n best_distribution = distribution.name\ \n best_sse = sse\ \n\ \n return {'name':best_distribution, 'sse': best_sse}\ \n\ " return text def addSuffixCode(self, indent=1): text ="\n\ \ndef check_drift( config):\ \n inputdriftObj = inputdrift(config)\ \n historicaldataFrame=pd.read_csv(inputdriftObj.trainingDataPath)\ \n currentdataFrame=pd.read_csv(inputdriftObj.currentDataLocation)\ \n dataalertcount,message = inputdriftObj.get_input_drift(currentdataFrame,historicaldataFrame)\ \n if message == 'Model is working as expected':\ \n output_json = {'status':'SUCCESS','data':{'Message':'Model is working as expected'}}\ \n else:\ \n output_json = {'status':'SUCCESS','data':{'Affected Columns':message}}\ \n return(output_json)\ \n\ \nif __name__ == '__main__':\ \n try:\ \n if len(sys.argv) < 2:\ \n raise ValueError('config file not present')\ \n config = sys.argv[1]\ \n if Path(config).is_file() and Path(config).suffix == '.json':\ \n with open(config, 'r') as f:\ \n config = json.load(f)\ \n else:\ \n config = json.loads(config)\ \n output = check_drift(config)\ \n status = {'Status':'Success','Message':output}\ \n print('input_drift:'+json.dumps(status))\ \n except Exception as e:\ \n status = {'Status':'Failure','Message':str(e)}\ \n print('input_drift:'+json.dumps(status))" return text def addStatement(self, statement, indent=1): self.codeText += '\n' + self.tab * indent + statement def generateCode(self): self.codeText += self.addInputDriftClass() self.codeText += self.addSuffixCode() def getCode(self): return self.codeText
output_drift.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ class output_drift(): def __init__(self, missing=False, word2num_features = None, cat_encoder=False, target_encoder=False, normalizer=False, text_profiler=False, feature_reducer=False, score_smaller_is_better=True, problem_type='classification', tab_size=4): self.tab = ' ' * tab_size self.codeText = '' self.missing = missing self.word2num_features = word2num_features self.cat_encoder = cat_encoder self.target_encoder = target_encoder self.normalizer = normalizer self.text_profiler = text_profiler self.feature_reducer = feature_reducer self.score_smaller_is_better = score_smaller_is_better self.problem_type = problem_type def addDatabaseClass(self, indent=0): text = "\ \nclass database():\ \n def __init__(self, config):\ \n self.host = config['host']\ \n self.port = config['port']\ \n self.user = config['user']\ \n self.password = config['password']\ \n self.database = config['database']\ \n self.measurement = config['measurement']\ \n self.tags = config['tags']\ \n self.client = self.get_client()\ \n\ \n def read_data(self, query)->pd.DataFrame:\ \n cursor = self.client.query(query)\ \n points = cursor.get_points()\ \n my_list=list(points)\ \n df=pd.DataFrame(my_list)\ \n return df\ \n\ \n def get_client(self):\ \n client = InfluxDBClient(self.host,self.port,self.user,self.password)\ \n databases = client.get_list_database()\ \n databases = [x['name'] for x in databases]\ \n if self.database not in databases:\ \n client.create_database(self.database)\ \n return InfluxDBClient(self.host,self.port,self.user,self.password, self.database)\ \n\ \n def write_data(self,data):\ \n if isinstance(data, pd.DataFrame):\ \n sorted_col = data.columns.tolist()\ \n sorted_col.sort()\ \n data = data[sorted_col]\ \n data = data.to_dict(orient='records')\ \n for row in data:\ \n if 'time' in row.keys():\ \n p = '%Y-%m-%dT%H:%M:%S.%fZ'\ \n time_str = datetime.strptime(row['time'], p)\ \n del row['time']\ \n else:\ \n time_str = None\ \n if 'model_ver' in row.keys():\ \n self.tags['model_ver']= row['model_ver']\ \n del row['model_ver']\ \n json_body = [{\ \n 'measurement': self.measurement,\ \n 'time': time_str,\ \n 'tags': self.tags,\ \n 'fields': row\ \n }]\ \n self.client.write_points(json_body)\ \n\ \n def close(self):\ \n self.client.close()\ \n" if indent: text = text.replace('\n', (self.tab * indent) + '\n') return text def addPredictClass(self, indent=0): text = "\ \nclass predict():\ \n\ \n def __init__(self, base_config):\ \n self.usecase = base_config['modelName'] + '_' + base_config['modelVersion']\ \n self.dataLocation = base_config['dataLocation']\ \n self.db_enabled = base_config.get('db_enabled', False)\ \n if self.db_enabled:\ \n self.db_config = base_config['db_config']\ \n home = Path.home()\ \n if platform.system() == 'Windows':\ \n from pathlib import WindowsPath\ \n output_data_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'Data'\ \n output_model_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'target'/self.usecase\ \n else:\ \n from pathlib import PosixPath\ \n output_data_dir = PosixPath(home)/'HCLT'/'AION'/'Data'\ \n output_model_dir = PosixPath(home)/'HCLT'/'AION'/'target'/self.usecase\ \n if not output_model_dir.exists():\ \n raise ValueError(f'Configuration file not found at {output_model_dir}')\ \n\ \n tracking_uri = 'file:///' + str(Path(output_model_dir)/'mlruns')\ \n registry_uri = 'sqlite:///' + str(Path(output_model_dir)/'mlruns.db')\ \n mlflow.set_tracking_uri(tracking_uri)\ \n mlflow.set_registry_uri(registry_uri)\ \n client = mlflow.tracking.MlflowClient(\ \n tracking_uri=tracking_uri,\ \n registry_uri=registry_uri,\ \n )\ \n self.model_version = client.get_latest_versions(self.usecase, stages=['production'] )[0].version\ \n model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase)\ \n self.model = mlflow.pyfunc.load_model(model_version_uri)\ \n run = client.get_run(self.model.metadata.run_id)\ \n if run.info.artifact_uri.startswith('file:'): #remove file:///\ \n self.artifact_path = Path(run.info.artifact_uri[len('file:///') : ])\ \n else:\ \n self.artifact_path = Path(run.info.artifact_uri)\ \n with open(self.artifact_path/'deploy.json', 'r') as f:\ \n deployment_dict = json.load(f)\ \n with open(self.artifact_path/'features.txt', 'r') as f:\ \n self.train_features = f.readline().rstrip().split(',')\ \n\ \n self.dataLocation = base_config['dataLocation']\ \n self.selected_features = deployment_dict['load_data']['selected_features']\ \n self.target_feature = deployment_dict['load_data']['target_feature']\ \n self.output_model_dir = output_model_dir" if self.missing: text += "\n self.missing_values = deployment_dict['transformation']['fillna']" if self.word2num_features: text += "\n self.word2num_features = deployment_dict['transformation']['word2num_features']" if self.cat_encoder == 'labelencoding': text += "\n self.cat_encoder = deployment_dict['transformation']['cat_encoder']" elif (self.cat_encoder == 'targetencoding') or (self.cat_encoder == 'onehotencoding'): text += "\n self.cat_encoder = deployment_dict['transformation']['cat_encoder']['file']" text += "\n self.cat_encoder_cols = deployment_dict['transformation']['cat_encoder']['features']" if self.target_encoder: text += "\n self.target_encoder = joblib.load(self.artifact_path/deployment_dict['transformation']['target_encoder'])" if self.normalizer: text += "\n self.normalizer = joblib.load(self.artifact_path/deployment_dict['transformation']['normalizer']['file'])\ \n self.normalizer_col = deployment_dict['transformation']['normalizer']['features']" if self.text_profiler: text += "\n self.text_profiler = joblib.load(self.artifact_path/deployment_dict['transformation']['Status']['text_profiler']['file'])\ \n self.text_profiler_col = deployment_dict['transformation']['Status']['text_profiler']['features']" if self.feature_reducer: text += "\n self.feature_reducer = joblib.load(self.artifact_path/deployment_dict['featureengineering']['feature_reducer']['file'])\ \n self.feature_reducer_cols = deployment_dict['featureengineering']['feature_reducer']['features']" text += """ def read_data_from_db(self): if self.db_enabled: try: db = database(self.db_config) query = "SELECT * FROM {} WHERE model_ver = '{}' AND {} != ''".format(db.measurement, self.model_version, self.target_feature) if 'read_time' in self.db_config.keys() and self.db_config['read_time']: query += f" time > now() - {self.db_config['read_time']}" data = db.read_data(query) except: raise ValueError('Unable to read from the database') finally: if db: db.close() return data return None""" text += "\ \n def predict(self, data):\ \n df = pd.DataFrame()\ \n if Path(data).exists():\ \n if Path(data).suffix == '.tsv':\ \n df=read_data(data,encoding='utf-8',sep='\t')\ \n elif Path(data).suffix == '.csv':\ \n df=read_data(data,encoding='utf-8')\ \n else:\ \n if Path(data).suffix == '.json':\ \n jsonData = read_json(data)\ \n df = pd.json_normalize(jsonData)\ \n elif is_file_name_url(data):\ \n df = read_data(data,encoding='utf-8')\ \n else:\ \n jsonData = json.loads(data)\ \n df = pd.json_normalize(jsonData)\ \n if len(df) == 0:\ \n raise ValueError('No data record found')\ \n missing_features = [x for x in self.selected_features if x not in df.columns]\ \n if missing_features:\ \n raise ValueError(f'some feature/s is/are missing: {missing_features}')\ \n if self.target_feature not in df.columns:\ \n raise ValueError(f'Ground truth values/target column({self.target_feature}) not found in current data')\ \n df_copy = df.copy()\ \n df = df[self.selected_features]" if self.word2num_features: text += "\n for feat in self.word2num_features:" text += "\n df[ feat ] = df[feat].apply(lambda x: s2n(x))" if self.missing: text += "\n df.fillna(self.missing_values, inplace=True)" if self.cat_encoder == 'labelencoding': text += "\n df.replace(self.cat_encoder, inplace=True)" elif self.cat_encoder == 'targetencoding': text += "\n cat_enc = joblib.load(self.artifact_path/self.cat_encoder)" text += "\n df = cat_enc.transform(df)" elif self.cat_encoder == 'onehotencoding': text += "\n cat_enc = joblib.load(self.artifact_path/self.cat_encoder)" text += "\n transformed_data = cat_enc.transform(df[self.cat_encoder_cols]).toarray()" text += "\n df[cat_enc.get_feature_names()] = pd.DataFrame(transformed_data, columns=cat_enc.get_feature_names())[cat_enc.get_feature_names()]" if self.normalizer: text += "\n df[self.normalizer_col] = self.normalizer.transform(df[self.normalizer_col])" if self.text_profiler: text += "\n text_corpus = df[self.text_profiler_col].apply(lambda row: ' '.join(row.values.astype(str)), axis=1)\ \n df_vect=self.text_profiler.transform(text_corpus)\ \n if isinstance(df_vect, np.ndarray):\ \n df1 = pd.DataFrame(df_vect)\ \n else:\ \n df1 = pd.DataFrame(df_vect.toarray(),columns = self.text_profiler.named_steps['vectorizer'].get_feature_names())\ \n df1 = df1.add_suffix('_vect')\ \n df = pd.concat([df, df1],axis=1)" if self.feature_reducer: text += "\n df = self.feature_reducer.transform(df[self.feature_reducer_cols])" else: text += "\n df = df[self.train_features]" if self.target_encoder: text += "\n output = pd.DataFrame(self.model._model_impl.predict_proba(df), columns=self.target_encoder.classes_)\ \n df_copy['prediction'] = output.idxmax(axis=1)" else: text += "\n output = self.model.predict(df).reshape(1, -1)[0].round(2)\ \n df_copy['prediction'] = output" text += "\n return df_copy" if indent: text = text.replace('\n', (self.tab * indent) + '\n') return text def getClassificationMatrixCode(self, indent=0): text = "\ \ndef get_classification_metrices(actual_values, predicted_values):\ \n result = {}\ \n accuracy_score = sklearn.metrics.accuracy_score(actual_values, predicted_values)\ \n avg_precision = sklearn.metrics.precision_score(actual_values, predicted_values,\ \n average='macro')\ \n avg_recall = sklearn.metrics.recall_score(actual_values, predicted_values,\ \n average='macro')\ \n avg_f1 = sklearn.metrics.f1_score(actual_values, predicted_values,\ \n average='macro')\ \n\ \n result['accuracy'] = accuracy_score\ \n result['precision'] = avg_precision\ \n result['recall'] = avg_recall\ \n result['f1'] = avg_f1\ \n return result\ \n\ " if indent: text = text.replace('\n', (self.tab * indent) + '\n') return text def getRegrssionMatrixCode(self, indent=0): text = "\ \ndef get_regression_metrices( actual_values, predicted_values):\ \n result = {}\ \n\ \n me = np.mean(predicted_values - actual_values)\ \n sde = np.std(predicted_values - actual_values, ddof = 1)\ \n\ \n abs_err = np.abs(predicted_values - actual_values)\ \n mae = np.mean(abs_err)\ \n sdae = np.std(abs_err, ddof = 1)\ \n\ \n abs_perc_err = 100.*np.abs(predicted_values - actual_values) / actual_values\ \n mape = np.mean(abs_perc_err)\ \n sdape = np.std(abs_perc_err, ddof = 1)\ \n\ \n result['mean_error'] = me\ \n result['mean_abs_error'] = mae\ \n result['mean_abs_perc_error'] = mape\ \n result['error_std'] = sde\ \n result['abs_error_std'] = sdae\ \n result['abs_perc_error_std'] = sdape\ \n return result\ \n\ " if indent: text = text.replace('\n', (self.tab * indent) + '\n') return text def addSuffixCode(self, indent=1): text ="\n\ \ndef check_drift( config):\ \n prediction = predict(config)\ \n usecase = config['modelName'] + '_' + config['modelVersion']\ \n train_data_path = prediction.artifact_path/(usecase+'_data.csv')\ \n if not train_data_path.exists():\ \n raise ValueError(f'Training data not found at {train_data_path}')\ \n curr_with_pred = prediction.read_data_from_db()\ \n if prediction.target_feature not in curr_with_pred.columns:\ \n raise ValueError('Ground truth not updated for corresponding data in database')\ \n train_with_pred = prediction.predict(train_data_path)\ \n performance = {}" if self.problem_type == 'classification': text += "\n\ \n performance['train'] = get_classification_metrices(train_with_pred[prediction.target_feature], train_with_pred['prediction'])\ \n performance['current'] = get_classification_metrices(curr_with_pred[prediction.target_feature], curr_with_pred['prediction'])" else: text += "\n\ \n performance['train'] = get_regression_metrices(train_with_pred[prediction.target_feature], train_with_pred['prediction'])\ \n performance['current'] = get_regression_metrices(curr_with_pred[prediction.target_feature], curr_with_pred['prediction'])" text += "\n return performance" text += "\n\ \nif __name__ == '__main__':\ \n try:\ \n if len(sys.argv) < 2:\ \n raise ValueError('config file not present')\ \n config = sys.argv[1]\ \n if Path(config).is_file() and Path(config).suffix == '.json':\ \n with open(config, 'r') as f:\ \n config = json.load(f)\ \n else:\ \n config = json.loads(config)\ \n output = check_drift(config)\ \n status = {'Status':'Success','Message':json.loads(output)}\ \n print('output_drift:'+json.dumps(status))\ \n except Exception as e:\ \n status = {'Status':'Failure','Message':str(e)}\ \n print('output_drift:'+json.dumps(status))" if indent: text = text.replace('\n', (self.tab * indent) + '\n') return text def addStatement(self, statement, indent=1): self.codeText += '\n' + self.tab * indent + statement def generateCode(self): self.codeText += self.addDatabaseClass() self.codeText += self.addPredictClass() if self.problem_type == 'classification': self.codeText += self.getClassificationMatrixCode() elif self.problem_type == 'regression': self.codeText += self.getRegrssionMatrixCode() else: raise ValueError(f"Unsupported problem type: {self.problem_type}") self.codeText += self.addSuffixCode() def getCode(self): return self.codeText
deploy.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ import json class deploy(): def __init__(self, target_encoder=False, feature_reducer=False, score_smaller_is_better=True, tab_size=4): self.tab = ' ' * tab_size self.codeText = "\n\n\ \nclass deploy():\ \n\ \n def __init__(self, base_config, log=None):\ \n self.targetPath = (Path('aion')/base_config['targetPath']).resolve()\ \n if log:\ \n self.logger = log\ \n else:\ \n log_file = self.targetPath/IOFiles['log']\ \n self.logger = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)\ \n try:\ \n self.initialize(base_config)\ \n except Exception as e:\ \n self.logger.error(e, exc_info=True)\ \n\ \n def initialize(self, base_config):\ \n self.usecase = base_config['targetPath']\ \n monitoring_data = read_json(self.targetPath/IOFiles['monitor'])\ \n self.prod_db_type = monitoring_data['prod_db_type']\ \n self.db_config = monitoring_data['db_config']\ \n mlflow_default_config = {'artifacts_uri':'','tracking_uri_type':'','tracking_uri':'','registry_uri':''}\ \n tracking_uri, artifact_uri, registry_uri = get_mlflow_uris(monitoring_data.get('mlflow_config',mlflow_default_config), self.targetPath)\ \n mlflow.tracking.set_tracking_uri(tracking_uri)\ \n mlflow.tracking.set_registry_uri(registry_uri)\ \n client = mlflow.tracking.MlflowClient()\ \n self.model_version = client.get_latest_versions(self.usecase, stages=['production'] )\ \n model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase)\ \n self.model = mlflow.pyfunc.load_model(model_version_uri)\ \n run = client.get_run(self.model.metadata.run_id)\ \n if run.info.artifact_uri.startswith('file:'): #remove file:///\ \n skip_name = 'file:'\ \n if run.info.artifact_uri.startswith('file:///'):\ \n skip_name = 'file:///'\ \n self.artifact_path = Path(run.info.artifact_uri[len(skip_name) : ])\ \n self.artifact_path_type = 'file'\ \n meta_data = read_json(self.artifact_path/IOFiles['metaData'])\ \n else:\ \n self.artifact_path = run.info.artifact_uri\ \n self.artifact_path_type = 'url'\ \n meta_data_file = mlflow.artifacts.download_artifacts(self.artifact_path+'/'+IOFiles['metaData'])\ \n meta_data = read_json(meta_data_file)\ \n self.selected_features = meta_data['load_data']['selected_features']\ \n self.train_features = meta_data['training']['features']" if target_encoder: self.codeText += "\ \n if self.artifact_path_type == 'url':\ \n preprocessor_file = mlflow.artifacts.download_artifacts(self.artifact_path+'/'+meta_data['transformation']['preprocessor'])\ \n target_encoder_file = mlflow.artifacts.download_artifacts(self.artifact_path+'/'+meta_data['transformation']['target_encoder'])\ \n else:\ \n preprocessor_file = self.artifact_path/meta_data['transformation']['preprocessor']\ \n target_encoder_file = self.artifact_path/meta_data['transformation']['target_encoder']\ \n self.target_encoder = joblib.load(target_encoder_file)" else: self.codeText += "\ \n if self.artifact_path_type == 'url':\ \n preprocessor_file = mlflow.artifacts.download_artifacts(self.artifact_path+'/'+meta_data['transformation']['preprocessor'])\ \n else:\ \n preprocessor_file = self.artifact_path/meta_data['transformation']['preprocessor']" self.codeText += "\ \n self.preprocessor = joblib.load(preprocessor_file)\ \n self.preprocess_out_columns = meta_data['transformation']['preprocess_out_columns']\ " if feature_reducer: self.codeText += "\ \n if self.artifact_path_type == 'url':\ \n feature_reducer_file = mlflow.artifacts.download_artifacts(self.artifact_path+'/'+meta_data['featureengineering']['feature_reducer']['file'])\ \n else:\ \n feature_reducer_file = self.artifact_path/meta_data['featureengineering']['feature_reducer']['file']\ \n self.feature_reducer = joblib.load(feature_reducer_file)\ \n self.feature_reducer_cols = meta_data['featureengineering']['feature_reducer']['features']" self.codeText +="\n\ \n def write_to_db(self, data):\ \n prod_file = IOFiles['prodData']\ \n writer = dataReader(reader_type=self.prod_db_type,target_path=self.targetPath, config=self.db_config )\ \n writer.write(data, prod_file)\ \n writer.close()\ \n\ \n def predict(self, data=None):\ \n try:\ \n return self.__predict(data)\ \n except Exception as e:\ \n if self.logger:\ \n self.logger.error(e, exc_info=True)\ \n raise ValueError(json.dumps({'Status':'Failure', 'Message': str(e)}))\ \n\ \n def __predict(self, data=None):\ \n df = pd.DataFrame()\ \n jsonData = json.loads(data)\ \n df = pd.json_normalize(jsonData)\ \n if len(df) == 0:\ \n raise ValueError('No data record found')\ \n missing_features = [x for x in self.selected_features if x not in df.columns]\ \n if missing_features:\ \n raise ValueError(f'some feature/s is/are missing: {missing_features}')\ \n df_copy = df.copy()\ \n df = df[self.selected_features]\ \n df = self.preprocessor.transform(df)\ \n if isinstance(df, scipy.sparse.spmatrix):\ \n df = df.toarray()\ \n df = pd.DataFrame(df, columns=self.preprocess_out_columns)" if feature_reducer: self.codeText += "\n df = self.feature_reducer.transform(df[self.feature_reducer_cols])" else: self.codeText += "\n df = df[self.train_features]" if target_encoder: self.codeText += "\n df = df.astype(np.float32)\ \n output = pd.DataFrame(self.model._model_impl.predict_proba(df), columns=self.target_encoder.classes_)\ \n df_copy['prediction'] = output.idxmax(axis=1)\ \n self.write_to_db(df_copy)\ \n df_copy['probability'] = output.max(axis=1).round(2)\ \n df_copy['remarks'] = output.apply(lambda x: x.to_json(), axis=1)\ \n output = df_copy.to_json(orient='records')" else: self.codeText += "\n output = self.model._model_impl.predict(df).reshape(1, -1)[0].round(2)\ \n df_copy['prediction'] = output\ \n self.write_to_db(df_copy)\ \n output = df_copy.to_json(orient='records')" self.codeText += "\n return output" self.input_files = {} self.output_files = {} self.addInputFiles({'inputData' : 'rawData.dat', 'metaData' : 'modelMetaData.json', 'performance' : 'performance.json','monitor':'monitoring.json','log':'predict.log','prodData':'prodData'}) def addInputFiles(self, files): if not isinstance(files, dict): raise TypeError(f"Required dict type got {type(files)} type") for k,v in files.items(): self.input_files[k] = v def addOutputFiles(self, files): if not isinstance(files, dict): raise TypeError(f"Required dict type got {type(files)} type") for k,v in files.items(): self.input_files[k] = v def getInputFiles(self): text = 'IOFiles = ' if not self.input_files: text += '{ }' else: text += json.dumps(self.input_files, indent=4) return text def getOutputFiles(self): text = 'output_file = ' if not self.output_files: text += '{ }' else: text += json.dumps(self.output_files, indent=4) return text def getInputOutputFiles(self, indent=0): text = '\n' text += self.getInputFiles() text += '\n' text += self.getOutputFiles() if indent: text = text.replace('\n', self.tab * indent + '\n') return text def addStatement(self, statement, indent=1): pass def getCode(self): return self.codeText def getGroundtruthCode(self): return """ import sys import math import json import sqlite3 import pandas as pd from datetime import datetime from pathlib import Path import platform from utility import * from data_reader import dataReader IOFiles = { "monitoring":"monitoring.json", "prodDataGT":"prodDataGT" } class groundtruth(): def __init__(self, base_config): self.targetPath = Path('aion')/base_config['targetPath'] data = read_json(self.targetPath/IOFiles['monitoring']) self.prod_db_type = data['prod_db_type'] self.db_config = data['db_config'] def actual(self, data=None): df = pd.DataFrame() jsonData = json.loads(data) df = pd.json_normalize(jsonData) if len(df) == 0: raise ValueError('No data record found') self.write_to_db(df) status = {'Status':'Success','Message':'uploaded'} return json.dumps(status) def write_to_db(self, data): prod_file = IOFiles['prodDataGT'] writer = dataReader(reader_type=self.prod_db_type, target_path=self.targetPath, config=self.db_config ) writer.write(data, prod_file) writer.close() """ def getServiceCode(self): return """ from http.server import BaseHTTPRequestHandler,HTTPServer from socketserver import ThreadingMixIn import os from os.path import expanduser import platform import threading import subprocess import argparse import re import cgi import json import shutil import logging import sys import time import seaborn as sns from pathlib import Path from predict import deploy from groundtruth import groundtruth import pandas as pd import scipy.stats as st import numpy as np import warnings from utility import * from data_reader import dataReader warnings.filterwarnings("ignore") config_input = None IOFiles = { "inputData": "rawData.dat", "metaData": "modelMetaData.json", "production": "production.json", "log": "aion.log", "monitoring":"monitoring.json", "prodData": "prodData", "prodDataGT":"prodDataGT" } def DistributionFinder(data): try: distributionName = "" sse = 0.0 KStestStatic = 0.0 dataType = "" if (data.dtype == "float64" or data.dtype == "float32"): dataType = "Continuous" elif (data.dtype == "int"): dataType = "Discrete" elif (data.dtype == "int64"): dataType = "Discrete" if (dataType == "Discrete"): distributions = [st.bernoulli, st.binom, st.geom, st.nbinom, st.poisson] index, counts = np.unique(data.astype(int), return_counts=True) if (len(index) >= 2): best_sse = np.inf y1 = [] total = sum(counts) mean = float(sum(index * counts)) / total variance = float((sum(index ** 2 * counts) - total * mean ** 2)) / (total - 1) dispersion = mean / float(variance) theta = 1 / float(dispersion) r = mean * (float(theta) / 1 - theta) for j in counts: y1.append(float(j) / total) pmf1 = st.bernoulli.pmf(index, mean) pmf2 = st.binom.pmf(index, len(index), p=mean / len(index)) pmf3 = st.geom.pmf(index, 1 / float(1 + mean)) pmf4 = st.nbinom.pmf(index, mean, r) pmf5 = st.poisson.pmf(index, mean) sse1 = np.sum(np.power(y1 - pmf1, 2.0)) sse2 = np.sum(np.power(y1 - pmf2, 2.0)) sse3 = np.sum(np.power(y1 - pmf3, 2.0)) sse4 = np.sum(np.power(y1 - pmf4, 2.0)) sse5 = np.sum(np.power(y1 - pmf5, 2.0)) sselist = [sse1, sse2, sse3, sse4, sse5] best_distribution = 'NA' for i in range(0, len(sselist)): if best_sse > sselist[i] > 0: best_distribution = distributions[i].name best_sse = sselist[i] elif (len(index) == 1): best_distribution = "Constant Data-No Distribution" best_sse = 0.0 distributionName = best_distribution sse = best_sse elif (dataType == "Continuous"): distributions = [st.uniform, st.expon, st.weibull_max, st.weibull_min, st.chi, st.norm, st.lognorm, st.t, st.gamma, st.beta] best_distribution = st.norm.name best_sse = np.inf datamin = data.min() datamax = data.max() nrange = datamax - datamin y, x = np.histogram(data.astype(float), bins='auto', density=True) x = (x + np.roll(x, -1))[:-1] / 2.0 for distribution in distributions: params = distribution.fit(data.astype(float)) arg = params[:-2] loc = params[-2] scale = params[-1] pdf = distribution.pdf(x, loc=loc, scale=scale, *arg) sse = np.sum(np.power(y - pdf, 2.0)) if (best_sse > sse > 0): best_distribution = distribution.name best_sse = sse distributionName = best_distribution sse = best_sse except: response = str(sys.exc_info()[0]) message = 'Job has Failed' + response exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) print(message) return distributionName, sse def getDriftDistribution(feature, dataframe, newdataframe=pd.DataFrame()): import matplotlib.pyplot as plt import math import io, base64, urllib np.seterr(divide='ignore', invalid='ignore') try: plt.clf() except: pass plt.rcParams.update({'figure.max_open_warning': 0}) sns.set(color_codes=True) pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] if len(feature) > 4: numneroffeatures = len(feature) plt.figure(figsize=(10, numneroffeatures*2)) else: plt.figure(figsize=(10,5)) for i in enumerate(feature): dataType = dataframe[i[1]].dtypes if dataType not in pandasNumericDtypes: dataframe[i[1]] = pd.Categorical(dataframe[i[1]]) dataframe[i[1]] = dataframe[i[1]].cat.codes dataframe[i[1]] = dataframe[i[1]].astype(int) dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mode()[0]) else: dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mean()) plt.subplots_adjust(hspace=0.5, wspace=0.7, top=1) plt.subplot(math.ceil((len(feature) / 2)), 2, i[0] + 1) distname, sse = DistributionFinder(dataframe[i[1]]) print(distname) ax = sns.distplot(dataframe[i[1]], label=distname) ax.legend(loc='best') if newdataframe.empty == False: dataType = newdataframe[i[1]].dtypes if dataType not in pandasNumericDtypes: newdataframe[i[1]] = pd.Categorical(newdataframe[i[1]]) newdataframe[i[1]] = newdataframe[i[1]].cat.codes newdataframe[i[1]] = newdataframe[i[1]].astype(int) newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mode()[0]) else: newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mean()) distname, sse = DistributionFinder(newdataframe[i[1]]) print(distname) ax = sns.distplot(newdataframe[i[1]],label=distname) ax.legend(loc='best') buf = io.BytesIO() plt.savefig(buf, format='png') buf.seek(0) string = base64.b64encode(buf.read()) uri = urllib.parse.quote(string) return uri def read_json(file_path): data = None with open(file_path,'r') as f: data = json.load(f) return data class HTTPRequestHandler(BaseHTTPRequestHandler): def do_POST(self): print('PYTHON ######## REQUEST ####### STARTED') if None != re.search('/AION/', self.path) or None != re.search('/aion/', self.path): ctype, pdict = cgi.parse_header(self.headers.get('content-type')) if ctype == 'application/json': length = int(self.headers.get('content-length')) data = self.rfile.read(length) usecase = self.path.split('/')[-2] if usecase.lower() == config_input['targetPath'].lower(): operation = self.path.split('/')[-1] data = json.loads(data) dataStr = json.dumps(data) if operation.lower() == 'predict': output=deployobj.predict(dataStr) resp = output elif operation.lower() == 'groundtruth': gtObj = groundtruth(config_input) output = gtObj.actual(dataStr) resp = output elif operation.lower() == 'delete': targetPath = Path('aion')/config_input['targetPath'] for file in data: x = targetPath/file if x.exists(): os.remove(x) resp = json.dumps({'Status':'Success'}) else: outputStr = json.dumps({'Status':'Error','Msg':'Operation not supported'}) resp = outputStr else: outputStr = json.dumps({'Status':'Error','Msg':'Wrong URL'}) resp = outputStr else: outputStr = json.dumps({'Status':'ERROR','Msg':'Content-Type Not Present'}) resp = outputStr resp=resp+'\\n' resp=resp.encode() self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() self.wfile.write(resp) else: print('python ==> else1') self.send_response(403) self.send_header('Content-Type', 'application/json') self.end_headers() print('PYTHON ######## REQUEST ####### ENDED') return def do_GET(self): print('PYTHON ######## REQUEST ####### STARTED') if None != re.search('/AION/', self.path) or None != re.search('/aion/', self.path): usecase = self.path.split('/')[-2] self.send_response(200) self.targetPath = Path('aion')/config_input['targetPath'] meta_data_file = self.targetPath/IOFiles['metaData'] if meta_data_file.exists(): meta_data = read_json(meta_data_file) else: raise ValueError(f'Configuration file not found: {meta_data_file}') production_file = self.targetPath/IOFiles['production'] if production_file.exists(): production_data = read_json(production_file) else: raise ValueError(f'Production Details not found: {production_file}') operation = self.path.split('/')[-1] if (usecase.lower() == config_input['targetPath'].lower()) and (operation.lower() == 'metrices'): self.send_header('Content-Type', 'text/html') self.end_headers() ModelString = production_data['Model'] ModelPerformance = ModelString+'_performance.json' performance_file = self.targetPath/ModelPerformance if performance_file.exists(): performance_data = read_json(performance_file) else: raise ValueError(f'Production Details not found: {performance_data}') Scoring_Creteria = performance_data['scoring_criteria'] train_score = round(performance_data['metrices']['train_score'],2) test_score = round(performance_data['metrices']['test_score'],2) current_score = 'NA' monitoring = read_json(self.targetPath/IOFiles['monitoring']) reader = dataReader(reader_type=monitoring['prod_db_type'],target_path=self.targetPath, config=monitoring['db_config']) inputDatafile = self.targetPath/IOFiles['inputData'] NoOfPrediction = 0 NoOfGroundTruth = 0 inputdistribution = '' if reader.file_exists(IOFiles['prodData']): dfPredict = reader.read(IOFiles['prodData']) dfinput = pd.read_csv(inputDatafile) features = meta_data['training']['features'] inputdistribution = getDriftDistribution(features,dfinput,dfPredict) NoOfPrediction = len(dfPredict) if reader.file_exists(IOFiles['prodDataGT']): dfGroundTruth = reader.read(IOFiles['prodDataGT']) NoOfGroundTruth = len(dfGroundTruth) common_col = [k for k in dfPredict.columns.tolist() if k in dfGroundTruth.columns.tolist()] proddataDF = pd.merge(dfPredict, dfGroundTruth, on =common_col,how = 'inner') if Scoring_Creteria.lower() == 'accuracy': from sklearn.metrics import accuracy_score current_score = accuracy_score(proddataDF[config_input['target_feature']], proddataDF['prediction']) current_score = round((current_score*100),2) elif Scoring_Creteria.lower() == 'recall': from sklearn.metrics import accuracy_score current_score = recall_score(proddataDF[config_input['target_feature']], proddataDF['prediction'],average='macro') current_score = round((current_score*100),2) msg = \"""<html> <head> <title>Performance Details</title> </head> <style> table, th, td {border} </style> <body> <h2><b>Deployed Model:</b>{ModelString}</h2> <br/> <table style="width:50%"> <tr> <td>No of Prediction</td> <td>{NoOfPrediction}</td> </tr> <tr> <td>No of GroundTruth</td> <td>{NoOfGroundTruth}</td> </tr> </table> <br/> <table style="width:100%"> <tr> <th>Score Type</th> <th>Train Score</th> <th>Test Score</th> <th>Production Score</th> </tr> <tr> <td>{Scoring_Creteria}</td> <td>{train_score}</td> <td>{test_score}</td> <td>{current_score}</td> </tr> </table> <br/> <br/> <img src="data:image/png;base64,{newDataDrift}" alt="" > </body> </html> \""".format(border='{border: 1px solid black;}',ModelString=ModelString,Scoring_Creteria=Scoring_Creteria,NoOfPrediction=NoOfPrediction,NoOfGroundTruth=NoOfGroundTruth,train_score=train_score,test_score=test_score,current_score=current_score,newDataDrift=inputdistribution) elif (usecase.lower() == config_input['targetPath'].lower()) and (operation.lower() == 'logs'): self.send_header('Content-Type', 'text/plain') self.end_headers() log_file = self.targetPath/IOFiles['log'] if log_file.exists(): with open(log_file) as f: msg = f.read() f.close() else: raise ValueError(f'Log Details not found: {log_file}') else: self.send_header('Content-Type', 'application/json') self.end_headers() features = meta_data['load_data']['selected_features'] bodydes='[' for x in features: if bodydes != '[': bodydes = bodydes+',' bodydes = bodydes+'{"'+x+'":"value"}' bodydes+=']' urltext = '/AION/'+config_input['targetPath']+'/predict' urltextgth='/AION/'+config_input['targetPath']+'/groundtruth' urltextproduction='/AION/'+config_input['targetPath']+'/metrices' msg=\""" Version:{modelversion} RunNo: {runNo} URL for Prediction ================== URL:{url} RequestType: POST Content-Type=application/json Body: {displaymsg} Output: prediction,probability(if Applicable),remarks corresponding to each row. URL for GroundTruth =================== URL:{urltextgth} RequestType: POST Content-Type=application/json Note: Make Sure that one feature (ID) should be unique in both predict and groundtruth. Otherwise outputdrift will not work URL for Model In Production Analysis ==================================== URL:{urltextproduction} RequestType: GET Content-Type=application/json \""".format(modelversion=config_input['modelVersion'],runNo=config_input['deployedRunNo'],url=urltext,urltextgth=urltextgth,urltextproduction=urltextproduction,displaymsg=bodydes) self.wfile.write(msg.encode()) else: self.send_response(403) self.send_header('Content-Type', 'application/json') self.end_headers() return class ThreadedHTTPServer(ThreadingMixIn, HTTPServer): allow_reuse_address = True def shutdown(self): self.socket.close() HTTPServer.shutdown(self) class file_status(): def __init__(self, reload_function, params, file, logger): self.files_status = {} self.initializeFileStatus(file) self.reload_function = reload_function self.params = params self.logger = logger def initializeFileStatus(self, file): self.files_status = {'path': file, 'time':file.stat().st_mtime} def is_file_changed(self): if self.files_status['path'].stat().st_mtime > self.files_status['time']: self.files_status['time'] = self.files_status['path'].stat().st_mtime return True return False def run(self): global config_input while( True): time.sleep(30) if self.is_file_changed(): production_details = targetPath/IOFiles['production'] if not production_details.exists(): raise ValueError(f'Model in production details does not exist') productionmodel = read_json(production_details) config_file = Path(__file__).parent/'config.json' if not Path(config_file).exists(): raise ValueError(f'Config file is missing: {config_file}') config_input = read_json(config_file) config_input['deployedModel'] = productionmodel['Model'] config_input['deployedRunNo'] = productionmodel['runNo'] self.logger.info('Model changed Reloading.....') self.logger.info(f'Model: {config_input["deployedModel"]}') self.logger.info(f'Version: {str(config_input["modelVersion"])}') self.logger.info(f'runNo: {str(config_input["deployedRunNo"])}') self.reload_function(config_input) class SimpleHttpServer(): def __init__(self, ip, port, model_file_path,reload_function,params, logger): self.server = ThreadedHTTPServer((ip,port), HTTPRequestHandler) self.status_checker = file_status( reload_function, params, model_file_path, logger) def start(self): self.server_thread = threading.Thread(target=self.server.serve_forever) self.server_thread.daemon = True self.server_thread.start() self.status_thread = threading.Thread(target=self.status_checker.run) self.status_thread.start() def waitForThread(self): self.server_thread.join() self.status_thread.join() def stop(self): self.server.shutdown() self.waitForThread() if __name__=='__main__': parser = argparse.ArgumentParser(description='HTTP Server') parser.add_argument('-ip','--ipAddress', help='HTTP Server IP') parser.add_argument('-pn','--portNo', type=int, help='Listening port for HTTP Server') args = parser.parse_args() config_file = Path(__file__).parent/'config.json' if not Path(config_file).exists(): raise ValueError(f'Config file is missing: {config_file}') config = read_json(config_file) if args.ipAddress: config['ipAddress'] = args.ipAddress if args.portNo: config['portNo'] = args.portNo targetPath = Path('aion')/config['targetPath'] if not targetPath.exists(): raise ValueError(f'targetPath does not exist') production_details = targetPath/IOFiles['production'] if not production_details.exists(): raise ValueError(f'Model in production details does not exist') productionmodel = read_json(production_details) config['deployedModel'] = productionmodel['Model'] config['deployedRunNo'] = productionmodel['runNo'] #server = SimpleHttpServer(config['ipAddress'],int(config['portNo'])) config_input = config logging.basicConfig(filename= Path(targetPath)/IOFiles['log'], filemode='a', format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S') logger = logging.getLogger(Path(__file__).parent.name) deployobj = deploy(config_input, logger) server = SimpleHttpServer(config['ipAddress'],int(config['portNo']),targetPath/IOFiles['production'],deployobj.initialize,config_input, logger) logger.info('HTTP Server Running...........') logger.info(f"IP Address: {config['ipAddress']}") logger.info(f"Port No.: {config['portNo']}") print('HTTP Server Running...........') print('For Prediction') print('================') print('Request Type: Post') print('Content-Type: application/json') print('URL: /AION/'+config['targetPath']+'/predict') print('\\nFor GroundTruth') print('================') print('Request Type: Post') print('Content-Type: application/json') print('URL: /AION/'+config['targetPath']+'/groundtruth') print('\\nFor Help') print('================') print('Request Type: Get') print('Content-Type: application/json') print('URL: /AION/'+config['targetPath']+'/help') print('\\nFor Model In Production Analysis') print('================') print('Request Type: Get') print('Content-Type: application/json') print('URL: /AION/'+config['targetPath']+'/metrices') server.start() server.waitForThread() """
trainer.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ import json class learner(): def __init__(self, problem_type="classification", target_feature="", sample_method=None,indent=0, tab_size=4): self.tab = " "*tab_size self.df_name = 'df' self.problem_type = problem_type self.target_feature = target_feature self.search_space = [] self.codeText = f"\ndef train(log):" self.input_files = {} self.output_files = {} self.function_code = '' self.addInputFiles({'inputData' : 'featureEngineeredData.dat','testData' : 'test.dat', 'metaData' : 'modelMetaData.json','monitor':'monitoring.json','log' : 'aion.log'}) def addInputFiles(self, files): if not isinstance(files, dict): raise TypeError(f"Required dict type got {type(files)} type") for k,v in files.items(): self.input_files[k] = v def addOutputFiles(self, files): if not isinstance(files, dict): raise TypeError(f"Required dict type got {type(files)} type") for k,v in files.items(): self.input_files[k] = v def getInputFiles(self): text = 'IOFiles = ' if not self.input_files: text += '{ }' else: text += json.dumps(self.input_files, indent=4) return text def getOutputFiles(self): text = 'output_file = ' if not self.output_files: text += '{ }' else: text += json.dumps(self.output_files, indent=4) return text def getInputOutputFiles(self, indent=0): text = '\n' text += self.getInputFiles() if indent: text = text.replace('\n', self.tab * indent + '\n') return text def __addValidateConfigCode(self): text = "\n\ \ndef validateConfig():\ \n config_file = Path(__file__).parent/'config.json'\ \n if not Path(config_file).exists():\ \n raise ValueError(f'Config file is missing: {config_file}')\ \n config = read_json(config_file)\ \n return config" return text def __addSaveModelCode(self): text = "\n\ \ndef save_model( experiment_id, estimator, features, metrices, params,tags, scoring):\ \n # mlflow log model, metrices and parameters\ \n with mlflow.start_run(experiment_id = experiment_id, run_name = model_name):\ \n return logMlflow(params, metrices, estimator, tags, model_name.split('_')[0])" return text def addStatement(self, statement, indent=1): self.codeText += '\n' + self.tab * indent + statement def getCode(self): return self.function_code + '\n' + self.codeText def addLocalFunctionsCode(self): self.function_code += self.__addValidateConfigCode() self.function_code += self.__addSaveModelCode() def getPrefixModules(self): modules = [{'module':'Path', 'mod_from':'pathlib'} ,{'module':'pandas', 'mod_as':'pd'} ] return modules def addPrefixCode(self, indent=1): self.codeText += "\ \n config = validateConfig()\ \n targetPath = Path('aion')/config['targetPath']\ \n if not targetPath.exists():\ \n raise ValueError(f'targetPath does not exist')\ \n meta_data_file = targetPath/IOFiles['metaData']\ \n if meta_data_file.exists():\ \n meta_data = read_json(meta_data_file)\ \n else:\ \n raise ValueError(f'Configuration file not found: {meta_data_file}')\ \n log_file = targetPath/IOFiles['log']\ \n log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)\ \n dataLoc = targetPath/IOFiles['inputData']\ \n if not dataLoc.exists():\ \n return {'Status':'Failure','Message':'Data location does not exists.'}\ \n\ \n status = dict()\ \n usecase = config['targetPath']\ \n df = pd.read_csv(dataLoc)\ \n prev_step_output = meta_data['featureengineering']['Status']" def getSuffixModules(self): modules = [{'module':'platform'} ,{'module':'time'} ,{'module':'mlflow'} ] return modules def add_100_trainsize_code(self): self.codeText +="\n\ \n else:\ \n test_score = train_score\ \n metrices = {}" def addSuffixCode(self, indent=1): self.codeText += "\n\ \n meta_data['training'] = {}\ \n meta_data['training']['features'] = features\ \n scoring = config['scoring_criteria']\ \n tags = {'estimator_name': model_name}\ \n monitoring_data = read_json(targetPath/IOFiles['monitor'])\ \n mlflow_default_config = {'artifacts_uri':'','tracking_uri_type':'','tracking_uri':'','registry_uri':''}\ \n mlflow_client, experiment_id = mlflow_create_experiment(monitoring_data.get('mlflow_config',mlflow_default_config), targetPath, usecase)\ \n run_id = save_model(experiment_id, estimator,features, metrices,best_params,tags,scoring)\ \n write_json(meta_data, targetPath/IOFiles['metaDataOutput'])\ \n write_json({'scoring_criteria': scoring, 'metrices':metrices, 'param':best_params}, targetPath/IOFiles['performance'])\ \n\ \n # return status\ \n status = {'Status':'Success','mlflow_run_id':run_id,'FeaturesUsed':features,'test_score':metrices['test_score'],'train_score':metrices['train_score']}\ \n log.info(f'Test score: {test_score}')\ \n log.info(f'Train score: {train_score}')\ \n log.info(f'MLflow run id: {run_id}')\ \n log.info(f'output: {status}')\ \n return json.dumps(status)" def getMainCodeModules(self): modules = [{'module':'Path', 'mod_from':'pathlib'} ,{'module':'sys'} ,{'module':'json'} ,{'module':'logging'} ] return modules def addMainCode(self, indent=1): self.codeText += "\n\ \nif __name__ == '__main__':\ \n log = None\ \n try:\ \n print(train(log))\ \n except Exception as e:\ \n if log:\ \n log.error(e, exc_info=True)\ \n status = {'Status':'Failure','Message':str(e)}\ \n print(json.dumps(status))\ " def add_variable(self, name, value, indent=1): if isinstance(value, str): self.codeText += f"\n{self.tab * indent}{name} = '{value}'" else: self.codeText += f"\n{self.tab * indent}{name} = {value}" def addStatement(self, statement, indent=1): self.codeText += f"\n{self.tab * indent}{statement}" def add_search_space_w(self, algoritms): for model, params in algoritms.items(): d = {'clf': f"[{model}()]"} for k,v in params.items(): if isinstance(v, str): d[f'clf__{k}']=f"'{v}'" else: d[f'clf__{k}']= f"{v}" self.search_space.append(d) def add_search_space(self, indent=1): self.codeText += f"\n{self.tab}search_space = config['search_space']" def add_train_test_split(self, train_feature, target_feature,test_ratio, indent=1): self.codeText += "\n\n # split the data for training\ \n selected_features = prev_step_output['selected_features']\ \n target_feature = config['target_feature']\ \n train_features = prev_step_output['total_features'].copy()\ \n train_features.remove(target_feature)\ \n X_train = df[train_features]\ \n y_train = df[target_feature]\ \n if config['test_ratio'] > 0.0:\ \n test_data = read_data(targetPath/IOFiles['testData'])\ \n X_test = test_data[train_features]\ \n y_test = test_data[target_feature]\ \n else:\ \n X_test = pd.DataFrame()\ \n y_test = pd.DataFrame()" def add_model_fit(self, estimator, optimizer, selector_method, importer, indent=1): # need to adjust the indent importer.addModule('importlib') importer.addModule('operator') text = f"\n features = selected_features['{selector_method}']\ \n estimator = {estimator}()\ \n param = config['algorithms']['{estimator}']" if optimizer == 'GridSearchCV': text += "\n grid = GridSearchCV(estimator, param,cv=config['optimization_param']['trainTestCVSplit'])\ \n grid.fit(X_train[features], y_train)\ \n train_score = grid.best_score_ * 100\ \n best_params = grid.best_params_\ \n estimator = grid.best_estimator_" elif optimizer == 'GeneticSelectionCV': text += "\n grid = GeneticSelectionCV(estimator, scoring=scorer, n_generations=config['optimization_param']['iterations'],cv=config['optimization_param']['trainTestCVSplit'],n_population=config['optimization_param']['geneticparams']['n_population'],crossover_proba=config['optimization_param']['geneticparams']['crossover_proba'],mutation_proba=config['optimization_param']['geneticparams']['mutation_proba'],crossover_independent_proba=config['optimization_param']['geneticparams']['crossover_independent_proba'],mutation_independent_proba=config['optimization_param']['geneticparams']['mutation_independent_proba'],tournament_size=config['optimization_param']['geneticparams']['tournament_size'],n_gen_no_change=config['optimization_param']['geneticparams']['n_gen_no_change'])\ \n grid.fit(X_train[features], y_train)\ \n train_score = grid.score(X_train[features], y_train)\ \n best_params = grid.estimator_.get_params()\ \n estimator = grid.estimator_" else: text += f"\n grid = {optimizer}(estimator, param, scoring=scorer, n_iter=config['optimization_param']['iterations'],cv=config['optimization_param']['trainTestCVSplit'])\ \n grid.fit(X_train[features], y_train)\ \n train_score = grid.best_score_ * 100\ \n best_params = grid.best_params_\ \n estimator = grid.best_estimator_" self.codeText += text def addLearner(self, model_name, params, importer, indent=1): importer.addModule('Pipeline', mod_from='sklearn.pipeline') importer.addModule('ColumnTransformer', mod_from='sklearn.compose') importer.addModule('confusion_matrix', mod_from='sklearn.metrics') model_params = [] for k,v in params.items(): if isinstance(v, str): model_params.append(f"{k}='{v}'") else: model_params.append(f"{k}={v}") model_params = ",".join(model_params) self.codeText += self.getTransformer() text = f"\n{self.tab * indent}pipeline = Pipeline(steps = [('preprocessor', preprocessor),('learner',{model_name}({model_params}))])" self.codeText += text self.codeText += self.splitTargetFeature(importer) if self.balancing: self.codeText += self.balancingCode(importer) self.codeText += self.fitModelCode(importer) def splitTargetFeature(self, importer, indent=1): importer.addModule('train_test_split', mod_from='sklearn.model_selection') return f"\n{self.tab * indent}target = df['{self.target_feature}']\ \n{self.tab * indent}df = df.drop(['{self.target_feature}'], axis=1)\ \n{self.tab * indent}X_train, X_test, y_train, y_test = train_test_split(df,target, train_size = percentage/100.0)" def getCode_remove(self, model_name=None, indent=1): return self.codeText def getDFName(self): return self.df_name def copyCode(self, learner): self.codeText = learner.getCode()
selector.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ import json class selector(): def __init__(self, indent=0, tab_size=4): self.tab = " "*tab_size self.codeText = f"\n\ndef featureSelector(log):" self.pipe = 'pipe' self.code_generated = False self.input_files = {} self.output_files = {} self.function_code = '' self.addInputFiles({'inputData' : 'transformedData.dat', 'metaData' : 'modelMetaData.json','log' : 'aion.log','outputData' : 'featureEngineeredData.dat'}) def addInputFiles(self, files): if not isinstance(files, dict): raise TypeError(f"Required dict type got {type(files)} type") for k,v in files.items(): self.input_files[k] = v def addOutputFiles(self, files): if not isinstance(files, dict): raise TypeError(f"Required dict type got {type(files)} type") for k,v in files.items(): self.input_files[k] = v def getInputFiles(self): text = 'IOFiles = ' if not self.input_files: text += '{ }' else: text += json.dumps(self.input_files, indent=4) return text def getOutputFiles(self): text = 'output_file = ' if not self.output_files: text += '{ }' else: text += json.dumps(self.output_files, indent=4) return text def getInputOutputFiles(self, indent=0): text = '\n' text += self.getInputFiles() if indent: text = text.replace('\n', self.tab * indent + '\n') return text def __addValidateConfigCode(self): text = "\n\ \ndef validateConfig():\ \n config_file = Path(__file__).parent/'config.json'\ \n if not Path(config_file).exists():\ \n raise ValueError(f'Config file is missing: {config_file}')\ \n config = read_json(config_file)\ \n return config" return text def addMainCode(self): self.codeText += "\n\ \nif __name__ == '__main__':\ \n log = None\ \n try:\ \n print(featureSelector(log))\ \n except Exception as e:\ \n if log:\ \n log.error(e, exc_info=True)\ \n status = {'Status':'Failure','Message':str(e)}\ \n print(json.dumps(status))\ " def addValidateConfigCode(self, indent=1): self.function_code += self.__addValidateConfigCode() def addStatement(self, statement, indent=1): self.codeText += '\n' + self.tab * indent + statement def getCode(self): return self.function_code + '\n' + self.codeText def addLocalFunctionsCode(self): self.addValidateConfigCode() def getPrefixModules(self): modules = [{'module':'Path', 'mod_from':'pathlib'} ,{'module':'pandas', 'mod_as':'pd'} ] return modules def addPrefixCode(self, indent=1): self.codeText += "\ \n config = validateConfig()\ \n targetPath = Path('aion')/config['targetPath']\ \n if not targetPath.exists():\ \n raise ValueError(f'targetPath does not exist')\ \n meta_data_file = targetPath/IOFiles['metaData']\ \n if meta_data_file.exists():\ \n meta_data = read_json(meta_data_file)\ \n else:\ \n raise ValueError(f'Configuration file not found: {meta_data_file}')\ \n log_file = targetPath/IOFiles['log']\ \n log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)\ \n dataLoc = targetPath/IOFiles['inputData']\ \n if not dataLoc.exists():\ \n return {'Status':'Failure','Message':'Data location does not exists.'}\ \n\ \n status = dict()\ \n df = pd.read_csv(dataLoc)\ \n prev_step_output = meta_data['transformation']" def getSuffixModules(self): modules = [{'module':'platform'} ,{'module':'time'} ] return modules def addSuffixCode(self, indent=1): self.codeText += "\n\ \n csv_path = str(targetPath/IOFiles['outputData'])\ \n write_data(df, csv_path,index=False)\ \n status = {'Status':'Success','DataFilePath':IOFiles['outputData'],'total_features':total_features, 'selected_features':selected_features}\ \n log.info(f'Selected data saved at {csv_path}')\ \n meta_data['featureengineering']['Status'] = status\ \n write_json(meta_data, str(targetPath/IOFiles['metaData']))\ \n log.info(f'output: {status}')\ \n return json.dumps(status)" def getMainCodeModules(self): modules = [{'module':'Path', 'mod_from':'pathlib'} ,{'module':'sys'} ,{'module':'json'} ,{'module':'logging'} ,{'module':'argparse'} ] return modules def add_variable(self, name, value, indent=1): if isinstance(value, str): self.codeText += f"\n{self.tab * indent}{name} = '{value}'" else: self.codeText += f"\n{self.tab * indent}{name} = {value}" def addStatement(self, statement, indent=1): self.codeText += f"\n{self.tab * indent}{statement}" def modelBased(self, problem_type, indent=1): if problem_type == 'classification': self.codeText += f"\n{self.tab * indent}selector = SelectFromModel(ExtraTreesClassifier())" self.codeText += f"\n{self.tab * indent}selector()" if problem_type == 'regression': self.codeText += f"\n{self.tab * indent}pipe = Pipeline([('selector', SelectFromModel(Lasso()))])" self.codeText += f"\n{self.tab * indent}selector.fit(df[train_features],df[target_feature])" self.codeText += f"\n{self.tab * indent}selected_features = [x for x,y in zip(train_features, selector.get_support()) if y]" self.codeText += f"\n{self.tab * indent}df = df[selected_features + [target_feature]]" def featureReductionBased(self, reducer, n_components, indent=1): if reducer == 'pca': if n_components == 0: self.codeText += f"\n{self.tab * indent}pipe = Pipeline([('selector', PCA(n_components='mle',svd_solver = 'full'))])" elif n_components < 1: self.codeText += f"\n{self.tab * indent}pipe = Pipeline([('selector', PCA(n_components={n_components},svd_solver = 'full'))])" else: self.codeText += f"\n{self.tab * indent}pipe = Pipeline([('selector', PCA(n_components=int({n_components})))])" self.codeText += "pipe.fit_transform(df)" def getPipe(self): return self.pipe
utility.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from .imports import importModule utility_functions = { 'load_data': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], 'transformer': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], 'selector': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], 'train': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], 'register': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], 'Prediction': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], 'drift': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], } #TODO convert read and write functions in to class functions functions_code = { 'read_json':{'imports':[{'mod':'json'}],'code':"\n\ \ndef read_json(file_path):\ \n data = None\ \n with open(file_path,'r') as f:\ \n data = json.load(f)\ \n return data\ \n"}, 'write_json':{'imports':[{'mod':'json'}],'code':"\n\ \ndef write_json(data, file_path):\ \n with open(file_path,'w') as f:\ \n json.dump(data, f)\ \n"}, 'read_data':{'imports':[{'mod':'pandas','mod_as':'pd'}],'code':"\n\ \ndef read_data(file_path, encoding='utf-8', sep=','):\ \n return pd.read_csv(file_path, encoding=encoding, sep=sep)\ \n"}, 'write_data':{'imports':[{'mod':'pandas','mod_as':'pd'}],'code':"\n\ \ndef write_data(data, file_path, index=False):\ \n return data.to_csv(file_path, index=index)\ \n\ \n#Uncomment and change below code for google storage\ \n#from google.cloud import storage\ \n#def write_data(data, file_path, index=False):\ \n# file_name= file_path.name\ \n# data.to_csv('output_data.csv')\ \n# storage_client = storage.Client()\ \n# bucket = storage_client.bucket('aion_data')\ \n# bucket.blob('prediction/'+file_name).upload_from_filename('output_data.csv', content_type='text/csv')\ \n# return data\ \n"}, 'is_file_name_url':{'imports':[],'code':"\n\ \ndef is_file_name_url(file_name):\ \n supported_urls_starts_with = ('gs://','https://','http://')\ \n return file_name.startswith(supported_urls_starts_with)\ \n"}, 'logger_class':{'imports':[{'mod':'logging'}, {'mod':'io'}],'code':"\n\ \nclass logger():\ \n #setup the logger\ \n def __init__(self, log_file, mode='w', logger_name=None):\ \n logging.basicConfig(filename=log_file, filemode=mode, format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S')\ \n self.log = logging.getLogger(logger_name)\ \n\ \n #get logger\ \n def getLogger(self):\ \n return self.log\ \n\ \n def info(self, msg):\ \n self.log.info(msg)\ \n\ \n def error(self, msg, exc_info=False):\ \n self.log.error(msg,exc_info)\ \n\ \n # format and log dataframe\ \n def log_dataframe(self, df, rows=2, msg=None):\ \n buffer = io.StringIO()\ \n df.info(buf=buffer)\ \n log_text = 'Data frame{}'.format(' after ' + msg + ':' if msg else ':')\ \n log_text += '\\n\\t'+str(df.head(rows)).replace('\\n','\\n\\t')\ \n log_text += ('\\n\\t' + buffer.getvalue().replace('\\n','\\n\\t'))\ \n self.log.info(log_text)\ \n"}, } class utility_function(): def __init__(self, module): if module in utility_functions.keys(): self.module_name = module else: self.module_name = None self.importer = importModule() self.codeText = "" def get_code(self): code = "" if self.module_name: functions = utility_functions[self.module_name] for function in functions: self.codeText += self.get_function_code(function) code = self.importer.getCode() code += self.codeText return code def get_function_code(self, name): code = "" if name in functions_code.keys(): code += functions_code[name]['code'] if self.importer: if 'imports' in functions_code[name].keys(): for module in functions_code[name]['imports']: mod_name = module['mod'] mod_from = module.get('mod_from', None) mod_as = module.get('mod_as', None) self.importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as) return code def get_importer(self): return self.importer if __name__ == '__main__': obj = utility_function('load_data') p = obj.get_utility_code() print(p)
drift_analysis.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ import json class drift(): def __init__(self, tab_size=4): self.tab = ' ' * tab_size self.codeText = '' def getInputFiles(self): IOFiles = { "log": "aion.log", "trainingData":"rawData.dat", "production": "production.json", "monitoring":"monitoring.json", "prodData": "prodData", "prodDataGT":"prodDataGT" } text = 'IOFiles = ' if not IOFiles: text += '{ }' else: text += json.dumps(IOFiles, indent=4) return text def getInputOutputFiles(self, indent=0): text = '\n' text += self.getInputFiles() if indent: text = text.replace('\n', self.tab * indent + '\n') return text def addStatement(self, statement, indent=1): self.codeText += '\n' + self.tab * indent + statement def getCode(self): return self.codeText # temporary code def get_input_drift_import_modules(self): return [ {'module': 'sys', 'mod_from': None, 'mod_as': None}, {'module': 'json', 'mod_from': None, 'mod_as': None}, {'module': 'mlflow', 'mod_from': None, 'mod_as': None}, {'module': 'stats', 'mod_from': 'scipy', 'mod_as': 'st'}, {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}, {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, {'module': 'warnings', 'mod_from': None, 'mod_as': None}, {'module': 'platform', 'mod_from': None, 'mod_as': None } ] def get_input_drift_code(self): return """ class inputdrift(): def __init__(self,base_config): if 'mlflowURL' in base_config: self.usecase = base_config['modelName'] + '_' + base_config['modelVersion'] self.currentDataLocation = base_config['currentDataLocation'] home = Path.home() if platform.system() == 'Windows': from pathlib import WindowsPath output_data_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'Data' output_model_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'target'/self.usecase else: from pathlib import PosixPath output_data_dir = PosixPath(home)/'HCLT'/'AION'/'Data' output_model_dir = PosixPath(home)/'HCLT'/'AION'/'target'/self.usecase if not output_model_dir.exists(): raise ValueError(f'Configuration file not found at {output_model_dir}') tracking_uri = 'file:///' + str(Path(output_model_dir)/'mlruns') registry_uri = 'sqlite:///' + str(Path(output_model_dir)/'mlruns.db') mlflow.set_tracking_uri(tracking_uri) mlflow.set_registry_uri(registry_uri) client = mlflow.tracking.MlflowClient( tracking_uri=tracking_uri, registry_uri=registry_uri, ) model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase) model = mlflow.pyfunc.load_model(model_version_uri) run = client.get_run(model.metadata.run_id) if run.info.artifact_uri.startswith('file:'): artifact_path = Path(run.info.artifact_uri[len('file:///') : ]) else: artifact_path = Path(run.info.artifact_uri) self.trainingDataPath = artifact_path/(self.usecase + '_data.csv') def get_input_drift(self,current_data, historical_data): curr_num_feat = current_data.select_dtypes(include='number') hist_num_feat = historical_data.select_dtypes(include='number') num_features = [feat for feat in historical_data.columns if feat in curr_num_feat] alert_count = 0 data = { 'current':{'data':current_data}, 'hist': {'data': historical_data} } dist_changed_columns = [] dist_change_message = [] for feature in num_features: curr_static_value = round(st.ks_2samp( hist_num_feat[feature], curr_num_feat[feature]).pvalue,3) if (curr_static_value < 0.05): try: distribution = {} distribution['hist'] = self.DistributionFinder( historical_data[feature]) distribution['curr'] = self.DistributionFinder( current_data[feature]) if(distribution['hist']['name'] == distribution['curr']['name']): pass else: alert_count = alert_count + 1 dist_changed_columns.append(feature) changed_column = {} changed_column['Feature'] = feature changed_column['KS_Training'] = curr_static_value changed_column['Training_Distribution'] = distribution['hist']['name'] changed_column['New_Distribution'] = distribution['curr']['name'] dist_change_message.append(changed_column) except: pass if alert_count: resultStatus = dist_change_message else : resultStatus='Model is working as expected' return(alert_count, resultStatus) def DistributionFinder(self,data): best_distribution ='' best_sse =0.0 if(data.dtype in ['int','int64']): distributions= {'bernoulli':{'algo':st.bernoulli}, 'binom':{'algo':st.binom}, 'geom':{'algo':st.geom}, 'nbinom':{'algo':st.nbinom}, 'poisson':{'algo':st.poisson} } index, counts = np.unique(data.astype(int),return_counts=True) if(len(index)>=2): best_sse = np.inf y1=[] total=sum(counts) mean=float(sum(index*counts))/total variance=float((sum(index**2*counts) -total*mean**2))/(total-1) dispersion=mean/float(variance) theta=1/float(dispersion) r=mean*(float(theta)/1-theta) for j in counts: y1.append(float(j)/total) distributions['bernoulli']['pmf'] = distributions['bernoulli']['algo'].pmf(index,mean) distributions['binom']['pmf'] = distributions['binom']['algo'].pmf(index,len(index),p=mean/len(index)) distributions['geom']['pmf'] = distributions['geom']['algo'].pmf(index,1/float(1+mean)) distributions['nbinom']['pmf'] = distributions['nbinom']['algo'].pmf(index,mean,r) distributions['poisson']['pmf'] = distributions['poisson']['algo'].pmf(index,mean) sselist = [] for dist in distributions.keys(): distributions[dist]['sess'] = np.sum(np.power(y1 - distributions[dist]['pmf'], 2.0)) if np.isnan(distributions[dist]['sess']): distributions[dist]['sess'] = float('inf') best_dist = min(distributions, key=lambda v: distributions[v]['sess']) best_distribution = best_dist best_sse = distributions[best_dist]['sess'] elif (len(index) == 1): best_distribution = 'Constant Data-No Distribution' best_sse = 0.0 elif(data.dtype in ['float64','float32']): distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta] best_distribution = st.norm.name best_sse = np.inf nrange = data.max() - data.min() y, x = np.histogram(data.astype(float), bins='auto', density=True) x = (x + np.roll(x, -1))[:-1] / 2.0 for distribution in distributions: with warnings.catch_warnings(): warnings.filterwarnings('ignore') params = distribution.fit(data.astype(float)) arg = params[:-2] loc = params[-2] scale = params[-1] pdf = distribution.pdf(x, loc=loc, scale=scale, *arg) sse = np.sum(np.power(y - pdf, 2.0)) if( sse < best_sse): best_distribution = distribution.name best_sse = sse return {'name':best_distribution, 'sse': best_sse} def check_drift( config): inputdriftObj = inputdrift(config) historicaldataFrame=pd.read_csv(inputdriftObj.trainingDataPath,skipinitialspace = True,na_values=['-','?']) currentdataFrame=pd.read_csv(inputdriftObj.currentDataLocation,skipinitialspace = True,na_values=['-','?']) historicaldataFrame.columns = historicaldataFrame.columns.str.strip() currentdataFrame.columns = currentdataFrame.columns.str.strip() dataalertcount,message = inputdriftObj.get_input_drift(currentdataFrame,historicaldataFrame) if message == 'Model is working as expected': output_json = {'status':'SUCCESS','data':{'Message':'Model is working as expected'}} else: output_json = {'status':'SUCCESS','data':{'Affected Columns':message}} return(output_json) """ def get_main_drift_code(self, problem_type, smaller_is_better=True): text = '' if problem_type == 'classification': text += """ def is_drift_within_limits(production, current_matrices,scoring_criteria,threshold = 5): testscore = production['score'] current_score = current_matrices[scoring_criteria] threshold_value = testscore * threshold / 100.0 if current_score > (testscore - threshold_value) : return True else: return False def get_metrices(actual_values, predicted_values): from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score result = {} accuracy_score = accuracy_score(actual_values, predicted_values) avg_precision = precision_score(actual_values, predicted_values, average='macro') avg_recall = recall_score(actual_values, predicted_values, average='macro') avg_f1 = f1_score(actual_values, predicted_values, average='macro') result['accuracy'] = round((accuracy_score*100),2) result['precision'] = round((avg_precision*100),2) result['recall'] = round((avg_recall*100),2) result['f1'] = round((avg_f1*100),2) return result """ else: text += """ def is_drift_within_limits(production, current_matrices,scoring_criteria,threshold = 5): testscore = production['score'] current_score = current_matrices[scoring_criteria] threshold_value = testscore * threshold / 100.0 """ if smaller_is_better: text += """ if current_score < (testscore + threshold_value) :""" else: text += """ if current_score > (testscore - threshold_value) :""" text += """ return True else: return False def get_metrices(actual_values, predicted_values): import numpy as np result = {} me = np.mean(predicted_values - actual_values) sde = np.std(predicted_values - actual_values, ddof = 1) abs_err = np.abs(predicted_values - actual_values) mae = np.mean(abs_err) sdae = np.std(abs_err, ddof = 1) abs_perc_err = 100.0 * np.abs(predicted_values - actual_values) / actual_values mape = np.mean(abs_perc_err) sdape = np.std(abs_perc_err, ddof = 1) result['mean_error'] = me result['mean_abs_error'] = mae result['mean_abs_perc_error'] = mape result['error_std'] = sde result['abs_error_std'] = sdae result['abs_perc_error_std'] = sdape return result """ text += """ def monitoring(config, log=None): targetPath = Path('aion')/config['targetPath'] targetPath.mkdir(parents=True, exist_ok=True) log_file = targetPath/IOFiles['log'] log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem) output_json = {} trainingDataLocation = targetPath/IOFiles['trainingData'] monitoring = targetPath/IOFiles['monitoring'] log.info(f'Input Location External: {config["inputUriExternal"]}') trainingStatus = 'False' dataFileLocation = '' driftStatus = 'No Drift' if monitoring.exists(): monitoring_data = read_json(monitoring) if monitoring_data.get('runNo', False): reader = dataReader(reader_type=monitoring_data.get('prod_db_type','sqlite'),target_path=targetPath, config=config.get('db_config',None)) production= targetPath/IOFiles['production'] proddataDF = pd.DataFrame() predicted_data = pd.DataFrame() if production.exists(): production = read_json(production) if reader.file_exists(IOFiles['prodData']) and reader.file_exists(IOFiles['prodDataGT']): predicted_data = reader.read(IOFiles['prodData']) actual_data = reader.read(IOFiles['prodDataGT']) common_col = [k for k in predicted_data.columns.tolist() if k in actual_data.columns.tolist()] proddataDF = pd.merge(actual_data, predicted_data, on =common_col,how = 'inner') currentPerformance = {} currentPerformance = get_metrices(proddataDF[config['target_feature']], proddataDF['prediction']) if is_drift_within_limits(production, currentPerformance,config['scoring_criteria']): log.info(f'OutputDrift: No output drift found') output_json.update({'outputDrift':'Model score is with in limits'}) else: log.info(f'OutputDrift: Found Output Drift') log.info(f'Original Test Score: {production["score"]}') log.info(f'Current Score: {currentPerformance[config["scoring_criteria"]]}') output_json.update({'outputDrift':{'Meassage': 'Model output is drifted','trainedScore':production["score"], 'currentScore':currentPerformance[config["scoring_criteria"]]}}) trainingStatus = 'True' driftStatus = 'Output Drift' else: if reader.file_exists(IOFiles['prodData']): predicted_data = reader.read(IOFiles['prodData']) log.info(f'OutputDrift: Prod Data not found') output_json.update({'outputDrift':'Prod Data not found'}) else: log.info(f'Last Time pipeline not executed completely') output_json.update({'Msg':'Pipeline is not executed completely'}) trainingStatus = 'True' if config['inputUriExternal']: dataFileLocation = config['inputUriExternal'] elif 's3' in config.keys(): dataFileLocation = 'cloud' else: dataFileLocation = config['inputUri'] if trainingStatus == 'False': historicaldataFrame=pd.read_csv(trainingDataLocation) if config['inputUriExternal']: currentdataFrame=pd.read_csv(config['inputUriExternal']) elif not predicted_data.empty: currentdataFrame = predicted_data.copy() elif 's3' in config.keys(): reader = dataReader(reader_type='s3',target_path=config['targetPath'], config=config['s3']) currentdataFrame = reader.read(config['s3']['file_name']) else: currentdataFrame=pd.read_csv(config['inputUri']) inputdriftObj = inputdrift(config) dataalertcount,inputdrift_message = inputdriftObj.get_input_drift(currentdataFrame,historicaldataFrame) if inputdrift_message == 'Model is working as expected': log.info(f'InputDrift: No input drift found') output_json.update({'Status':'SUCCESS','inputDrift':'Model is working as expected'}) else: log.info(f'InputDrift: Input drift found') log.info(f'Affected Columns {inputdrift_message}') output_json.update({'inputDrift':{'Affected Columns':inputdrift_message}}) trainingStatus = 'True' driftStatus = 'Input Drift' if config['inputUriExternal']: dataFileLocation = config['inputUriExternal'] elif actual_data_path.exists() and predict_data_path.exists(): dataFileLocation = '' elif 's3' in config.keys(): dataFileLocation = 'cloud' else: dataFileLocation = config['inputUri'] else: log.info(f'Pipeline Executing first Time') output_json.update({'Msg':'Pipeline executing first time'}) trainingStatus = 'True' if config['inputUriExternal']: dataFileLocation = config['inputUriExternal'] elif 's3' in config.keys(): dataFileLocation = 'cloud' else: dataFileLocation = config['inputUri'] else: log.info(f'Pipeline Executing first Time') output_json.update({'Msg':'Pipeline executing first time'}) trainingStatus = 'True' if config['inputUriExternal']: dataFileLocation = config['inputUriExternal'] elif 's3' in config.keys(): dataFileLocation = 'cloud' else: dataFileLocation = config['inputUri'] if monitoring.exists(): monitoring_data['runNo'] = int(monitoring_data.get('runNo', '0')) + 1 else: monitoring_data = {} monitoring_data['runNo'] = 1 monitoring_data['prod_db_type'] = config.get('prod_db_type', 'sqlite') monitoring_data['db_config'] = config.get('db_config', {}) monitoring_data['mlflow_config'] = config.get('mlflow_config', None) if 's3' in config.keys(): monitoring_data['s3'] = config['s3'] monitoring_data['dataLocation'] = dataFileLocation monitoring_data['driftStatus'] = driftStatus write_json(monitoring_data,targetPath/IOFiles['monitoring']) output = {'Status':'SUCCESS'} output.update(output_json) return(json.dumps(output)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-i', '--inputUri', help='Training Data Location') args = parser.parse_args() config_file = Path(__file__).parent/'config.json' if not Path(config_file).exists(): raise ValueError(f'Config file is missing: {config_file}') config = read_json(config_file) config['inputUriExternal'] = None if args.inputUri: if args.inputUri != '': config['inputUriExternal'] = args.inputUri log = None try: print(monitoring(config, log)) except Exception as e: if log: log.error(e, exc_info=True) status = {'Status':'Failure','Message':str(e)} print(json.dumps(status)) raise Exception(str(e)) """ return text
data_reader.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from .imports import importModule supported_reader = ['sqlite', 'influx','s3'] functions_code = { 'dataReader':{'imports':[{'mod':'json'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'}],'code':""" class dataReader(): def get_reader(self, reader_type, target_path=None, config=None): if reader_type == 'sqlite': return sqlite_writer(target_path=target_path) elif reader_type == 'influx': return Influx_writer(config=config) elif reader_type == 'gcs': return gcs(config=config) elif reader_type == 'azure': return azure(config=config) elif reader_type == 's3': return s3bucket(config=config) else: raise ValueError(reader_type) """ }, 'sqlite':{'imports':[{'mod':'sqlite3'},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None}],'code':"""\n\ class sqlite_writer(): def __init__(self, target_path): self.target_path = Path(target_path) database_file = self.target_path.stem + '.db' self.db = sqlite_db(self.target_path, database_file) def file_exists(self, file): if file: return self.db.table_exists(file) else: return False def read(self, file): return self.db.read(file) def write(self, data, file): self.db.write(data, file) def close(self): self.db.close() class sqlite_db(): def __init__(self, location, database_file=None): if not isinstance(location, Path): location = Path(location) if database_file: self.database_name = database_file else: self.database_name = location.stem + '.db' db_file = str(location/self.database_name) self.conn = sqlite3.connect(db_file) self.cursor = self.conn.cursor() self.tables = [] def table_exists(self, name): if name in self.tables: return True elif name: query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';" listOfTables = self.cursor.execute(query).fetchall() if len(listOfTables) > 0 : self.tables.append(name) return True return False def read(self, table_name): return pd.read_sql_query(f"SELECT * FROM {table_name}", self.conn) def create_table(self,name, columns, dtypes): query = f'CREATE TABLE IF NOT EXISTS {name} (' for column, data_type in zip(columns, dtypes): query += f"'{column}' TEXT," query = query[:-1] query += ');' self.conn.execute(query) return True def write(self,data, table_name): if not self.table_exists(table_name): self.create_table(table_name, data.columns, data.dtypes) tuple_data = list(data.itertuples(index=False, name=None)) insert_query = f'INSERT INTO {table_name} VALUES(' for i in range(len(data.columns)): insert_query += '?,' insert_query = insert_query[:-1] + ')' self.cursor.executemany(insert_query, tuple_data) self.conn.commit() return True def delete(self, name): pass def close(self): self.conn.close() """ }, 'influx':{'imports':[{'mod':'InfluxDBClient','mod_from':'influxdb'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'}],'code':"""\n\ class Influx_writer(): def __init__(self, config): self.db = influx_db(config) def file_exists(self, file): if file: return self.db.table_exists(file) else: return False def read(self, file): query = "SELECT * FROM {}".format(file) if 'read_time' in self.db_config.keys() and self.db_config['read_time']: query += f" time > now() - {self.db_config['read_time']}" return self.db.read(query) def write(self, data, file): self.db.write(data, file) def close(self): pass class influx_db(): def __init__(self, config): self.host = config['host'] self.port = config['port'] self.user = config.get('user', None) self.password = config.get('password', None) self.token = config.get('token', None) self.database = config['database'] self.measurement = config['measurement'] self.tags = config['tags'] self.client = self.get_client() def table_exists(self, name): query = f"SHOW MEASUREMENTS ON {self.database}" result = self.client(query) for measurement in result['measurements']: if measurement['name'] == name: return True return False def read(self, query)->pd.DataFrame: cursor = self.client.query(query) points = cursor.get_points() my_list=list(points) df=pd.DataFrame(my_list) return df def get_client(self): headers = None if self.token: headers={"Authorization": self.token} client = InfluxDBClient(self.host,self.port,self.user, self.password,headers=headers) databases = client.get_list_database() databases = [x['name'] for x in databases] if self.database not in databases: client.create_database(self.database) return InfluxDBClient(self.host,self.port,self.user,self.password,self.database,headers=headers) def write(self,data, measurement=None): if isinstance(data, pd.DataFrame): sorted_col = data.columns.tolist() sorted_col.sort() data = data[sorted_col] data = data.to_dict(orient='records') if not measurement: measurement = self.measurement for row in data: if 'time' in row.keys(): p = '%Y-%m-%dT%H:%M:%S.%fZ' time_str = datetime.strptime(row['time'], p) del row['time'] else: time_str = None if 'model_ver' in row.keys(): self.tags['model_ver']= row['model_ver'] del row['model_ver'] json_body = [{ 'measurement': measurement, 'time': time_str, 'tags': self.tags, 'fields': row }] self.client.write_points(json_body) def delete(self, name): pass def close(self): self.client.close() """ }, 's3':{'imports':[{'mod':'boto3'},{'mod': 'ClientError', 'mod_from': 'botocore.exceptions'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':"""\n\ class s3bucket(): def __init__(self, config={}): if 's3' in config.keys(): config = config['s3'] aws_access_key_id = config.get('aws_access_key_id','') aws_secret_access_key = config.get('aws_secret_access_key','') bucket_name = config.get('bucket_name','') if not aws_access_key_id: raise ValueError('aws_access_key_id can not be empty') if not aws_secret_access_key: raise ValueError('aws_secret_access_key can not be empty') self.client = boto3.client('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=str(aws_secret_access_key)) self.bucket_name = bucket_name def read(self, file_name): try: response = self.client.get_object(Bucket=self.bucket_name, Key=file_name) return pd.read_csv(response['Body']) except ClientError as ex: if ex.response['Error']['Code'] == 'NoSuchBucket': raise ValueError(f"Bucket '{self.bucket_name}' not found in aws s3 storage") elif ex.response['Error']['Code'] == 'NoSuchKey': raise ValueError(f"File '{file_name}' not found in s3 bucket '{self.bucket_name}'") else: raise """ }, 'azure':{'imports':[{'mod':'DataLakeServiceClient', 'mod_from':'azure.storage.filedatalake'},{'mod':'detect', 'mod_from':'detect_delimiter'},{'mod':'pandavro', 'mod_as':'pdx'},{'mod':'io'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':"""\n\ def azure(): def __init__(self,config={}): if 'azure' in config.keys(): config = config['azure'] account_name = config.get('account_name','') account_key = config.get('account_key','') container_name = config.get('container_name','') if not account_name: raise ValueError('Account name can not be empty') if not account_key: raise ValueError('Account key can not be empty') if not container_name: raise ValueError('Container name can not be empty') service_client = DataLakeServiceClient(account_url="{}://{}.dfs.core.windows.net".format("https", account_name), credential=account_key) self.file_system_client = service_client.get_file_system_client(container_name) def read(self, directory_name): root_dir = str(directory_name) file_paths = self.file_system_client.get_paths(path=root_dir) main_df = pd.DataFrame() for path in file_paths: if not path.is_directory: file_client = file_system_client.get_file_client(path.name) file_ext = Path(path.name).suffix if file_ext in [".csv", ".tsv"]: with open(csv_local, "wb") as my_file: file_client.download_file().readinto(my_file) with open(csv_local, 'r') as file: data = file.read() row_delimiter = detect(text=data, default=None, whitelist=[',', ';', ':', '|', '\t']) processed_df = pd.read_csv(csv_local, sep=row_delimiter) elif file_ext == ".parquet": stream = io.BytesIO() file_client.download_file().readinto(stream) processed_df = pd.read_parquet(stream, engine='pyarrow') elif file_ext == ".avro": with open(avro_local, "wb") as my_file: file_client.download_file().readinto(my_file) processed_df = pdx.read_avro(avro_local) if main_df.empty: main_df = pd.DataFrame(processed_df) else: main_df = main_df.append(processed_df, ignore_index=True) return main_df """ }, 'gcs':{'imports':[{'mod':'storage','mod_from':'google.cloud'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':"""\n\ class gcs(): def __init__(self, config={}): if 'gcs' in config.keys(): config = config['gcs'] account_key = config.get('account_key','') bucket_name = config.get('bucket_name','') if not account_key: raise ValueError('Account key can not be empty') if not bucket_name: raise ValueError('bucket name can not be empty') storage_client = storage.Client.from_service_account_json(account_key) self.bucket = storage_client.get_bucket(bucket_name) def read(self, bucket_name, file_name): data = self.bucket.blob(file_name).download_as_text() return pd.read_csv(data, encoding = 'utf-8', sep = ',') """ } } class data_reader(): def __init__(self, reader_type=[]): self.supported_readers = supported_reader if isinstance(reader_type, str): self.readers = [reader_type] elif not reader_type: self.readers = self.supported_readers else: self.readers = reader_type unsupported_reader = [ x for x in self.readers if x not in self.supported_readers] if unsupported_reader: raise ValueError(f"reader type '{unsupported_reader}' is not supported\nSupported readers are {self.supported_readers}") self.codeText = "" self.importer = importModule() def get_reader_code(self, readers): reader_code = { 'sqlite': 'return sqlite_writer(target_path=target_path)', 'influx': 'return Influx_writer(config=config)', 'gcs': 'return gcs(config=config)', 'azure': 'return azure(config=config)', 's3': 'return s3bucket(config=config)' } code = "\n\ndef dataReader(reader_type, target_path=None, config=None):\n" for i, reader in enumerate(readers): if not i: code += f" if reader_type == '{reader}':\n" else: code += f" elif reader_type == '{reader}':\n" code += f" {reader_code[reader]}\n" if readers: code += " else:\n" code += f""" raise ValueError("'{{reader_type}}' not added during code generation")\n""" else: code += f""" raise ValueError("'{{reader_type}}' not added during code generation")\n""" return code def get_code(self): code = self.get_reader_code(self.readers) functions = [] for reader in self.readers: functions.append(reader) for function in functions: code += self.get_function_code(function) self.codeText += self.importer.getCode() self.codeText += code return self.codeText def get_function_code(self, name): code = "" if name in functions_code.keys(): code += functions_code[name]['code'] if self.importer: if 'imports' in functions_code[name].keys(): for module in functions_code[name]['imports']: mod_name = module['mod'] mod_from = module.get('mod_from', None) mod_as = module.get('mod_as', None) self.importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as) return code def get_importer(self): return self.importer
imports.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from importlib.metadata import version import sys class importModule(): def __init__(self): self.importModule = {} self.stdlibModule = [] self.localModule = {} def addLocalModule(self,module, mod_from=None, mod_as=None): if module == '*': if module not in self.localModule.keys(): self.localModule[module]= [mod_from] else: self.localModule[module].append(mod_from) elif module not in self.localModule.keys(): self.localModule[module] = {'from':mod_from, 'as':mod_as} def addModule(self, module, mod_from=None, mod_as=None): if module not in self.importModule.keys(): self.importModule[module] = {'from':mod_from, 'as':mod_as} if module in sys.stdlib_module_names: self.stdlibModule.append(module) elif isinstance(self.importModule[module], list): if mod_as not in [x['as'] for x in self.importModule[module]]: self.importModule[module].append({'from':mod_from, 'as':mod_as}) elif mod_as not in [x['from'] for x in self.importModule[module]]: self.importModule[module].append({'from':mod_from, 'as':mod_as}) elif mod_as != self.importModule[module]['as']: as_list = [self.importModule[module]] as_list.append({'from':mod_from, 'as':mod_as}) self.importModule[module] = as_list elif mod_from != self.importModule[module]['from']: as_list = [self.importModule[module]] as_list.append({'from':mod_from, 'as':mod_as}) self.importModule[module] = as_list def getModules(self): return (self.importModule, self.stdlibModule) def getBaseModule(self, extra_importers=[]): modules_alias = { 'sklearn':'scikit-learn', 'genetic_selection':'sklearn-genetic', 'google': 'google-cloud-storage', 'azure':'azure-storage-file-datalake'} local_modules = {'AIX':'/app/AIX-0.1-py3-none-any.whl'} modules = [] require = "" if extra_importers: extra_importers = [importer.importModule for importer in extra_importers if isinstance(importer, importModule)] importers_module = [self.importModule] + extra_importers for importer_module in importers_module: for k,v in importer_module.items(): if v['from']: mod = v['from'].split('.')[0] else: mod = k if mod in modules_alias.keys(): mod = modules_alias[mod] modules.append(mod) modules = list(set(modules)) for mod in modules: try: if mod in local_modules.keys(): require += f"{local_modules[mod]}\n" else: require += f"{mod}=={version(mod)}\n" except : if mod not in sys.stdlib_module_names: raise return require def getCode(self): def to_string(k, v): mod = '' if v['from']: mod += 'from {} '.format(v['from']) mod += 'import {}'.format(k) if v['as']: mod += ' as {} '.format(v['as']) return mod modules = "" local_modules = "" std_lib_modules = "" third_party_modules = "" for k,v in self.importModule.items(): if k in self.stdlibModule: std_lib_modules = std_lib_modules + '\n' + to_string(k, v) elif isinstance(v, dict): third_party_modules = third_party_modules + '\n' + to_string(k, v) elif isinstance(v, list): for alias in v: third_party_modules = third_party_modules + '\n' + to_string(k, alias) for k,v in self.localModule.items(): if k != '*': local_modules = local_modules + '\n' + to_string(k, v) else: for mod_from in v: local_modules = local_modules + '\n' + f'from {mod_from} import {k}' if std_lib_modules: modules = modules + "\n#Standard Library modules" + std_lib_modules if third_party_modules: modules = modules + "\n\n#Third Party modules" + third_party_modules if local_modules: modules = modules + "\n\n#local modules" + local_modules + '\n' return modules def copyCode(self, importer): self.importModule, self.stdlibModule = importer.getModules()
transformer.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ import json class transformer(): def __init__(self, indent=0, tab_size=4): self.df_name = 'df' self.tab = ' ' * tab_size self.codeText = "" self.transformers = [] self.TxCols = [] self.imputers = {} self.input_files = {} self.output_files = {} self.function_code = '' self.addInputFiles({'inputData' : 'rawData.dat', 'metaData' : 'modelMetaData.json','log' : 'aion.log','trainData' : 'transformedData.dat','testData' : 'test.dat','preprocessor' : 'preprocessor.pkl'}) def addInputFiles(self, files): if not isinstance(files, dict): raise TypeError(f"Required dict type got {type(files)} type") for k,v in files.items(): self.input_files[k] = v def addOutputFiles(self, files): if not isinstance(files, dict): raise TypeError(f"Required dict type got {type(files)} type") for k,v in files.items(): self.input_files[k] = v def getInputFiles(self): text = 'IOFiles = ' if not self.input_files: text += '{ }' else: text += json.dumps(self.input_files, indent=4) return text def getOutputFiles(self): text = 'output_file = ' if not self.output_files: text += '{ }' else: text += json.dumps(self.output_files, indent=4) return text def getInputOutputFiles(self, indent=0): text = '\n' text += self.getInputFiles() if indent: text = text.replace('\n', self.tab * indent + '\n') return text def __addValidateConfigCode(self): text = "\n\ \ndef validateConfig():\ \n config_file = Path(__file__).parent/'config.json'\ \n if not Path(config_file).exists():\ \n raise ValueError(f'Config file is missing: {config_file}')\ \n config = read_json(config_file)\ \n return config" return text def getPrefixModules(self): modules = [ {'module':'Path', 'mod_from':'pathlib'} ,{'module':'pandas', 'mod_as':'pd'} ,{'module':'numpy', 'mod_as':'np'} ,{'module':'scipy'} ] return modules def addPrefixCode(self, indent=1): self.codeText += """ def transformation(log): config = validateConfig() targetPath = Path('aion')/config['targetPath'] if not targetPath.exists(): raise ValueError(f'targetPath does not exist') meta_data_file = targetPath/IOFiles['metaData'] if meta_data_file.exists(): meta_data = read_json(meta_data_file) else: raise ValueError(f'Configuration file not found: {meta_data_file}') log_file = targetPath/IOFiles['log'] log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem) dataLoc = targetPath/IOFiles['inputData'] if not dataLoc.exists(): return {'Status':'Failure','Message':'Data location does not exists.'} status = dict() df = read_data(dataLoc) log.log_dataframe(df) target_feature = config['target_feature'] if config['test_ratio'] == 0.0: train_data = df test_data = pd.DataFrame() else: """ def getSuffixModules(self): modules = [{'module':'pandas','mod_as':'pd'} ,{'module':'json'} ,{'module':'joblib'} ] return modules def addSuffixCode(self,encoder=False, indent=1): self.codeText += """ train_data, preprocess_pipe, label_encoder = profilerObj.transform() if not preprocess_pipe: raise ValueError('Pipeline not created') joblib.dump(preprocess_pipe, targetPath/IOFiles['preprocessor']) test_data.reset_index(inplace=True) """ if encoder: self.codeText += """ joblib.dump(label_encoder, targetPath/IOFiles['targetEncoder']) if not test_data.empty: ytest = label_encoder.transform(test_data[target_feature]) """ else: self.codeText += """ if not test_data.empty: ytest = test_data[target_feature] """ self.codeText += """ test_data.astype(profilerObj.train_features_type) test_data = preprocess_pipe.transform(test_data) if isinstance(test_data, scipy.sparse.spmatrix): test_data = test_data.toarray() preprocess_out_columns = train_data.columns.tolist() preprocess_out_columns.remove(target_feature) write_data(train_data,targetPath/IOFiles['trainData'],index=False) if isinstance( test_data, np.ndarray): test_data = pd.DataFrame(test_data, columns=preprocess_out_columns) test_data[target_feature] = ytest write_data(test_data,targetPath/IOFiles['testData'],index=False) log.log_dataframe(train_data) status = {'Status':'Success','trainData':IOFiles['trainData'],'testData':IOFiles['testData']} meta_data['transformation'] = {} meta_data['transformation']['cat_features'] = train_data.select_dtypes('category').columns.tolist() meta_data['transformation']['preprocessor'] = IOFiles['preprocessor'] meta_data['transformation']['preprocess_out_columns'] = preprocess_out_columns """ if encoder: self.codeText += """ meta_data['transformation']['target_encoder'] = IOFiles['targetEncoder'] """ self.codeText += """ meta_data['transformation']['Status'] = status write_json(meta_data, str(targetPath/IOFiles['metaData'])) log.info(f"Transformed data saved at {targetPath/IOFiles['trainData']}") log.info(f'output: {status}') return json.dumps(status) """ def getMainCodeModules(self): modules = [{'module':'Path', 'mod_from':'pathlib'} ,{'module':'sys'} ,{'module':'json'} ,{'module':'logging'} ,{'module':'argparse'} ] return modules def addMainCode(self, indent=1): self.codeText += "\n\ \nif __name__ == '__main__':\ \n log = None\ \n try:\ \n print(transformation(log))\ \n except Exception as e:\ \n if log:\ \n log.error(e, exc_info=True)\ \n status = {'Status':'Failure','Message':str(e)}\ \n print(json.dumps(status))" def addValidateConfigCode(self, indent=1): self.function_code += self.__addValidateConfigCode() def addLocalFunctionsCode(self): self.addValidateConfigCode() def addStatement(self, statement, indent=1): self.codeText += '\n' + self.tab * indent + statement def getCode(self, indent=1): return self.function_code + '\n' + self.codeText def getDFName(self): return self.df_name class data_profiler(): def __init__(self, importer, text_features=False): self.importer = importer self.codeText = "" self.text_features = text_features def addStatement(self, statement, indent=1): self.codeText += '\n' + self.tab * indent + statement def get_module_import_statement(self, mod): text = "" if not mod.get('module', None): return text if mod.get('mod_from', None): text += f"from {mod['mod_from']} " text += f"import {mod['module']} " if mod.get('mod_as', None): text += f"as {mod['mod_as']}" text += "\n" return text def get_import_modules(self): profiler_importes = [ {'module': 'scipy', 'mod_from': None, 'mod_as': None}, {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}, {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, {'module': 'w2n', 'mod_from': 'word2number', 'mod_as': None}, {'module': 'LabelEncoder', 'mod_from': 'sklearn.preprocessing', 'mod_as': None }, {'module': 'OrdinalEncoder', 'mod_from': 'sklearn.preprocessing', 'mod_as': None }, {'module': 'OneHotEncoder', 'mod_from': 'sklearn.preprocessing', 'mod_as': None }, {'module': 'SimpleImputer', 'mod_from': 'sklearn.impute', 'mod_as': None }, {'module': 'KNNImputer', 'mod_from': 'sklearn.impute', 'mod_as': None }, {'module': 'Pipeline', 'mod_from': 'sklearn.pipeline', 'mod_as': None }, {'module': 'FeatureUnion', 'mod_from': 'sklearn.pipeline', 'mod_as': None }, {'module': 'MinMaxScaler', 'mod_from': 'sklearn.preprocessing', 'mod_as': None }, {'module': 'StandardScaler', 'mod_from': 'sklearn.preprocessing', 'mod_as': None }, {'module': 'PowerTransformer', 'mod_from': 'sklearn.preprocessing', 'mod_as': None }, {'module': 'ColumnTransformer', 'mod_from': 'sklearn.compose', 'mod_as': None }, {'module': 'TransformerMixin', 'mod_from': 'sklearn.base', 'mod_as': None }, {'module': 'IsolationForest', 'mod_from': 'sklearn.ensemble', 'mod_as': None }, {'module': 'TargetEncoder', 'mod_from': 'category_encoders', 'mod_as': None } ] if self.text_features: profiler_importes.append({'module': 'textProfiler', 'mod_from': 'text.textProfiler', 'mod_as': None }) profiler_importes.append({'module': 'textCombine', 'mod_from': 'text.textProfiler', 'mod_as': None }) return profiler_importes def get_importer(self): return self.importer def get_code(self): common_importes = self.get_import_modules() for module in common_importes: mod_name = module['module'] mod_from = module.get('mod_from', None) mod_as = module.get('mod_as', None) if module['module'] in ['textProfiler','textCombine']: self.importer.addLocalModule(mod_name, mod_from=mod_from, mod_as=mod_as) else: self.importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as) self.codeText += """ STR_TO_CAT_CONVERSION_LEN_MAX = 10 log_suffix = f'[{Path(__file__).stem}] ' target_encoding_method_change = {'targetencoding': 'labelencoding'} supported_method = { 'fillNa': { 'categorical' : ['mode','zero','na'], 'numeric' : ['median','mean','knnimputer','zero','drop','na'], }, 'categoryEncoding': ['labelencoding','targetencoding','onehotencoding','na','none'], 'normalization': ['standardscaler','minmax','lognormal', 'na','none'], 'outlier_column_wise': ['iqr','zscore', 'disable'], 'outlierOperation': ['dropdata', 'average', 'nochange'] } def findiqrOutlier(df): Q1 = df.quantile(0.25) Q3 = df.quantile(0.75) IQR = Q3 - Q1 index = ~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR))) return index def findzscoreOutlier(df): z = np.abs(scipy.stats.zscore(df)) index = (z < 3) return index def findiforestOutlier(df): isolation_forest = IsolationForest(n_estimators=100) isolation_forest.fit(df) y_pred_train = isolation_forest.predict(df) return y_pred_train == 1 def get_one_true_option(d, default_value=None): if isinstance(d, dict): for k,v in d.items(): if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True): return k return default_value def get_boolean(value): if (isinstance(value, str) and value.lower() == 'true') or (isinstance(value, bool) and value == True): return True else: return False class profiler(): def __init__(self, xtrain, ytrain=None, target=None, encode_target = True, config={}, keep_unprocessed=[], log=None): if not isinstance(xtrain, pd.DataFrame): raise ValueError(f'{log_suffix}supported data type is pandas.DataFrame but provide data is of {type(xtrain)} type') if xtrain.empty: raise ValueError(f'{log_suffix}Data frame is empty') if target and target in xtrain.columns: self.target = xtrain[target] xtrain.drop(target, axis=1, inplace=True) self.target_name = target elif ytrain: self.target = ytrain self.target_name = 'target' else: self.target = pd.Series() self.target_name = None self.encode_target = encode_target self.label_encoder = None keep_unprocessed = [x for x in keep_unprocessed if x in xtrain.columns] if keep_unprocessed: self.unprocessed = xtrain[keep_unprocessed] self.data = xtrain.drop(keep_unprocessed, axis=1) else: self.data = xtrain self.unprocessed = pd.DataFrame() self.colm_type = {} for colm, infer_type in zip(self.data.columns, self.data.dtypes): self.colm_type[colm] = infer_type self.numeric_feature = [] self.cat_feature = [] self.text_feature = [] self.wordToNumericFeatures = [] self.added_features = [] self.pipeline = [] self.dropped_features = {} self.train_features_type={} self.__update_type() self.config = config self.featureDict = config.get('featureDict', []) self.output_columns = [] self.feature_expender = [] self.text_to_num = {} if log: self.log = log else: self.log = logging.getLogger('eion') self.type_conversion = {} def log_dataframe(self, msg=None): import io buffer = io.StringIO() self.data.info(buf=buffer) if msg: log_text = f'Data frame after {msg}:' else: log_text = 'Data frame:' log_text += '\\n\\t'+str(self.data.head(2)).replace('\\n','\\n\\t') log_text += ('\\n\\t' + buffer.getvalue().replace('\\n','\\n\\t')) self.log.info(log_text) def transform(self): if self.is_target_available(): if self.target_name: self.log.info(f"Target feature name: '{self.target_name}'") self.log.info(f"Target feature size: {len(self.target)}") else: self.log.info(f"Target feature not present") self.log_dataframe() try: self.process() except Exception as e: self.log.error(e, exc_info=True) raise pipe = FeatureUnion(self.pipeline) self.log.info(pipe) process_data = pipe.fit_transform(self.data, y=self.target) self.update_output_features_names(pipe) if isinstance(process_data, scipy.sparse.spmatrix): process_data = process_data.toarray() df = pd.DataFrame(process_data, columns=self.output_columns) if self.is_target_available() and self.target_name: df[self.target_name] = self.target if not self.unprocessed.empty: df[self.unprocessed.columns] = self.unprocessed self.log_numerical_fill() self.log_categorical_fill() self.log_normalization() return df, pipe, self.label_encoder def log_type_conversion(self): if self.log: self.log.info('----------- Inspecting Features -----------') self.log.info('----------- Type Conversion -----------') count = 0 for k, v in self.type_conversion.items(): if v[0] != v[1]: self.log.info(f'{k} -> from {v[0]} to {v[1]} : {v[2]}') self.log.info('Status:- |... Feature inspection done') def check_config(self): removeDuplicate = self.config.get('removeDuplicate', False) self.config['removeDuplicate'] = get_boolean(removeDuplicate) self.config['misValueRatio'] = float(self.config.get('misValueRatio', '1.0')) self.config['numericFeatureRatio'] = float(self.config.get('numericFeatureRatio', '1.0')) self.config['categoryMaxLabel'] = int(self.config.get('categoryMaxLabel', '20')) featureDict = self.config.get('featureDict', []) if isinstance(featureDict, dict): self.config['featureDict'] = [] if isinstance(featureDict, str): self.config['featureDict'] = [] def process(self): #remove duplicate not required at the time of prediction self.check_config() self.remove_constant_feature() self.remove_empty_feature(self.config['misValueRatio']) self.remove_index_features() self.drop_na_target() if self.config['removeDuplicate']: self.drop_duplicate() self.check_categorical_features() self.string_to_numeric() self.process_target() self.train_features_type = dict(zip(self.data.columns, self.data.dtypes)) self.parse_process_step_config() self.process_drop_fillna() #self.log_type_conversion() self.update_num_fill_dict() #print(self.num_fill_method_dict) self.update_cat_fill_dict() self.create_pipeline() self.text_pipeline(self.config) self.apply_outlier() self.log.info(self.process_method) self.log.info(self.train_features_type) def is_target_available(self): return (isinstance(self.target, pd.Series) and not self.target.empty) or len(self.target) def process_target(self, operation='encode', arg=None): if self.encode_target: if self.is_target_available(): self.label_encoder = LabelEncoder() self.target = self.label_encoder.fit_transform(self.target) return self.label_encoder return None def is_target_column(self, column): return column == self.target_name def fill_default_steps(self): num_fill_method = get_one_true_option(self.config.get('numericalFillMethod',None)) normalization_method = get_one_true_option(self.config.get('normalization',None)) for colm in self.numeric_feature: if num_fill_method: self.fill_missing_value_method(colm, num_fill_method.lower()) if normalization_method: self.fill_normalizer_method(colm, normalization_method.lower()) cat_fill_method = get_one_true_option(self.config.get('categoricalFillMethod',None)) cat_encode_method = get_one_true_option(self.config.get('categoryEncoding',None)) for colm in self.cat_feature: if cat_fill_method: self.fill_missing_value_method(colm, cat_fill_method.lower()) if cat_encode_method: self.fill_encoder_value_method(colm, cat_encode_method.lower(), default=True) def parse_process_step_config(self): self.process_method = {} user_provided_data_type = {} for feat_conf in self.featureDict: colm = feat_conf.get('feature', '') if not self.is_target_column(colm): if colm in self.data.columns: user_provided_data_type[colm] = feat_conf['type'] if user_provided_data_type: self.update_user_provided_type(user_provided_data_type) self.fill_default_steps() for feat_conf in self.featureDict: colm = feat_conf.get('feature', '') if not self.is_target_column(colm): if colm in self.data.columns: if feat_conf.get('fillMethod', None): self.fill_missing_value_method(colm, feat_conf['fillMethod'].lower()) if feat_conf.get('categoryEncoding', None): self.fill_encoder_value_method(colm, feat_conf['categoryEncoding'].lower()) if feat_conf.get('normalization', None): self.fill_normalizer_method(colm, feat_conf['normalization'].lower()) if feat_conf.get('outlier', None): self.fill_outlier_method(colm, feat_conf['outlier'].lower()) if feat_conf.get('outlierOperation', None): self.fill_outlier_process(colm, feat_conf['outlierOperation'].lower()) def update_output_features_names(self, pipe): columns = self.output_columns start_index = {} for feat_expender in self.feature_expender: if feat_expender: step_name = list(feat_expender.keys())[0] index = list(feat_expender.values())[0] for transformer_step in pipe.transformer_list: if transformer_step[1].steps[-1][0] in step_name: start_index[index] = {transformer_step[1].steps[-1][0]: transformer_step[1].steps[-1][1].get_feature_names()} if start_index: index_shifter = 0 for key,value in start_index.items(): for k,v in value.items(): if k == 'vectorizer': v = [f'{x}_vect' for x in v] key = key + index_shifter self.output_columns[key:key] = v index_shifter += len(v) self.added_features = [*self.added_features, *v] def text_pipeline(self, conf_json): if self.text_feature: pipeList = [] max_features = 2000 text_pipe = Pipeline([ ('selector', ColumnTransformer([ ("selector", "passthrough", self.text_feature) ], remainder="drop")), ("text_fillNa",SimpleImputer(strategy='constant', fill_value='')), ("merge_text_feature", textCombine())]) obj = textProfiler() pipeList = obj.textProfiler(conf_json, pipeList, max_features) last_step = "merge_text_feature" for pipe_elem in pipeList: text_pipe.steps.append((pipe_elem[0], pipe_elem[1])) last_step = pipe_elem[0] text_transformer = ('text_process', text_pipe) self.pipeline.append(text_transformer) self.feature_expender.append({last_step:len(self.output_columns)}) def create_pipeline(self): num_pipe = {} for k,v in self.num_fill_method_dict.items(): for k1,v1 in v.items(): if k1 and k1 != 'none': num_pipe[f'{k}_{k1}'] = Pipeline([ ('selector', ColumnTransformer([ ("selector", "passthrough", v1) ], remainder="drop")), (k, self.get_num_imputer(k)), (k1, self.get_num_scaler(k1)) ]) else: num_pipe[f'{k}_{k1}'] = Pipeline([ ('selector', ColumnTransformer([ ("selector", "passthrough", v1) ], remainder="drop")), (k, self.get_num_imputer(k)) ]) self.output_columns.extend(v1) cat_pipe = {} for k,v in self.cat_fill_method_dict.items(): for k1,v1 in v.items(): cat_pipe[f'{k}_{k1}'] = Pipeline([ ('selector', ColumnTransformer([ ("selector", "passthrough", v1) ], remainder="drop")), (k, self.get_cat_imputer(k)), (k1, self.get_cat_encoder(k1)) ]) if k1 not in ['onehotencoding']: self.output_columns.extend(v1) else: self.feature_expender.append({k1:len(self.output_columns)}) for key, pipe in num_pipe.items(): self.pipeline.append((key, pipe)) for key, pipe in cat_pipe.items(): self.pipeline.append((key, pipe)) if not self.unprocessed.empty: self.pipeline.append(Pipeline([ ('selector', ColumnTransformer([ ("selector", "passthrough", self.unprocessed.columns) ], remainder="drop"))])) "Drop: feature during training but replace with zero during prediction " def process_drop_fillna(self): drop_column = [] if 'numFill' in self.process_method.keys(): for col, method in self.process_method['numFill'].items(): if method == 'drop': self.process_method['numFill'][col] = 'zero' drop_column.append(col) if 'catFill' in self.process_method.keys(): for col, method in self.process_method['catFill'].items(): if method == 'drop': self.process_method['catFill'][col] = 'zero' drop_column.append(col) if drop_column: self.data.dropna(subset=drop_column, inplace=True) def update_num_fill_dict(self): self.num_fill_method_dict = {} if 'numFill' in self.process_method.keys(): for f in supported_method['fillNa']['numeric']: self.num_fill_method_dict[f] = {} for en in supported_method['normalization']: self.num_fill_method_dict[f][en] = [] for col in self.numeric_feature: numFillDict = self.process_method.get('numFill',{}) normalizationDict = self.process_method.get('normalization',{}) if f == numFillDict.get(col, '') and en == normalizationDict.get(col,''): self.num_fill_method_dict[f][en].append(col) if not self.num_fill_method_dict[f][en] : del self.num_fill_method_dict[f][en] if not self.num_fill_method_dict[f]: del self.num_fill_method_dict[f] def update_cat_fill_dict(self): self.cat_fill_method_dict = {} if 'catFill' in self.process_method.keys(): for f in supported_method['fillNa']['categorical']: self.cat_fill_method_dict[f] = {} for en in supported_method['categoryEncoding']: self.cat_fill_method_dict[f][en] = [] for col in self.cat_feature: catFillDict = self.process_method.get('catFill',{}) catEncoderDict = self.process_method.get('catEncoder',{}) if f == catFillDict.get(col, '') and en == catEncoderDict.get(col,''): self.cat_fill_method_dict[f][en].append(col) if not self.cat_fill_method_dict[f][en] : del self.cat_fill_method_dict[f][en] if not self.cat_fill_method_dict[f]: del self.cat_fill_method_dict[f] def __update_type(self): self.numeric_feature = self.data.select_dtypes(include='number').columns.tolist() self.cat_feature = self.data.select_dtypes(include='category').columns.tolist() self.date_time = self.data.select_dtypes(include='datetime').columns.tolist() self.text_feature = self.data.select_dtypes(include='object').columns.tolist() def update_user_provided_type(self, data_types): allowed_types = ['numerical','categorical', 'text','date','index'] type_mapping = {'numerical': np.dtype('float'), 'float': np.dtype('float'),'categorical': 'category', 'text':np.dtype('object'),'date':'datetime64[ns]','index': np.dtype('int64'),} mapped_type = {k:type_mapping[v] for k,v in data_types.items()} #self.log.info(mapped_type) self.update_type(mapped_type, 'user provided data type') def get_type(self, as_list=False): if as_list: return [self.colm_type.values()] else: return self.colm_type def update_type(self, data_types={}, reason=''): invalid_features = [x for x in data_types.keys() if x not in self.data.columns] if invalid_features: valid_feat = list(set(data_types.keys()) - set(invalid_features)) valid_feat_type = {k:v for k,v in data_types if k in valid_feat} else: valid_feat_type = data_types for k,v in valid_feat_type.items(): if v != self.colm_type[k].name: try: self.data.astype({k:v}) self.colm_type.update({k:self.data[k].dtype}) self.type_conversion[k] = (self.colm_type[k] , v, 'Done', reason) except: self.type_conversion[k] = (self.colm_type[k] , v, 'Fail', reason) self.data = self.data.astype(valid_feat_type) self.__update_type() def string_to_numeric(self): def to_number(x): try: return w2n.word_to_num(x) except: return np.nan for col in self.text_feature: col_values = self.data[col].copy() col_values = pd.to_numeric(col_values, errors='coerce') if col_values.count() >= (self.config['numericFeatureRatio'] * len(col_values)): self.text_to_num[col] = 'float64' self.wordToNumericFeatures.append(col) if self.text_to_num: columns = list(self.text_to_num.keys()) self.data[columns] = self.data[columns].apply(lambda x: to_number(x)) self.update_type(self.text_to_num) self.log.info('----------- Inspecting Features -----------') for col in self.text_feature: self.log.info(f'-------> Feature : {col}') if col in self.text_to_num: self.log.info('----------> Numeric Status :Yes') self.log.info('----------> Data Type Converting to numeric :Yes') else: self.log.info('----------> Numeric Status :No') self.log.info(f'\\nStatus:- |... Feature inspection done for numeric data: {len(self.text_to_num)} feature(s) converted to numeric') self.log.info(f'\\nStatus:- |... Feature word to numeric treatment done: {self.text_to_num}') self.log.info('----------- Inspecting Features End -----------') def check_categorical_features(self): num_data = self.data.select_dtypes(include='number') num_data_unique = num_data.nunique() num_to_cat_col = {} for i, value in enumerate(num_data_unique): if value < self.config['categoryMaxLabel']: num_to_cat_col[num_data_unique.index[i]] = 'category' if num_to_cat_col: self.update_type(num_to_cat_col, 'numerical to categorical') str_to_cat_col = {} str_data = self.data.select_dtypes(include='object') str_data_unique = str_data.nunique() for i, value in enumerate(str_data_unique): if value < self.config['categoryMaxLabel']: str_to_cat_col[str_data_unique.index[i]] = 'category' for colm in str_data.columns: if self.data[colm].str.len().max() < STR_TO_CAT_CONVERSION_LEN_MAX: str_to_cat_col[colm] = 'category' if str_to_cat_col: self.update_type(str_to_cat_col, 'text to categorical') def drop_features(self, features=[], reason='unspecified'): if isinstance(features, str): features = [features] feat_to_remove = [x for x in features if x in self.data.columns] if feat_to_remove: self.data.drop(feat_to_remove, axis=1, inplace=True) for feat in feat_to_remove: self.dropped_features[feat] = reason self.log_drop_feature(feat_to_remove, reason) self.__update_type() def drop_duplicate(self): index = self.data.duplicated(keep='first') if index.sum(): self.remove_rows(index, 'duplicate rows') def drop_na_target(self): if self.is_target_available(): self.remove_rows(self.target.isna(), 'null target values') def log_drop_feature(self, columns, reason): self.log.info(f'---------- Dropping {reason} features ----------') self.log.info(f'\\nStatus:- |... {reason} feature treatment done: {len(columns)} {reason} feature(s) found') self.log.info(f'-------> Drop Features: {columns}') self.log.info(f'Data Frame Shape After Dropping (Rows,Columns): {self.data.shape}') def log_normalization(self): if self.process_method.get('normalization', None): self.log.info(f'\\nStatus:- !... Normalization treatment done') for method in supported_method['normalization']: cols = [] for col, m in self.process_method['normalization'].items(): if m == method: cols.append(col) if cols and method != 'none': self.log.info(f'Running {method} on features: {cols}') def log_numerical_fill(self): if self.process_method.get('numFill', None): self.log.info(f'\\nStatus:- !... Fillna for numeric feature done') for method in supported_method['fillNa']['numeric']: cols = [] for col, m in self.process_method['numFill'].items(): if m == method: cols.append(col) if cols: self.log.info(f'-------> Running {method} on features: {cols}') def log_categorical_fill(self): if self.process_method.get('catFill', None): self.log.info(f'\\nStatus:-!... FillNa for categorical feature done') for method in supported_method['fillNa']['categorical']: cols = [] for col, m in self.process_method['catFill'].items(): if m == method: cols.append(col) if cols: self.log.info(f'-------> Running {method} on features: {cols}') def remove_constant_feature(self): unique_values = self.data.nunique() constant_features = [] for i, value in enumerate(unique_values): if value == 1: constant_features.append(unique_values.index[i]) if constant_features: self.drop_features(constant_features, "constant") for i in constant_features: try: self.numeric_feature.remove(i) except ValueError: pass try: self.cat_feature.remove(i) except ValueError: pass def remove_empty_feature(self, misval_ratio=1.0): missing_ratio = self.data.isnull().sum() / len(self.data) missing_ratio = {k:v for k,v in zip(self.data.columns, missing_ratio)} empty_features = [k for k,v in missing_ratio.items() if v > misval_ratio] if empty_features: self.drop_features(empty_features, "empty") for i in empty_features: try: self.numeric_feature.remove(i) except ValueError: pass try: self.cat_feature.remove(i) except: pass def remove_index_features(self): index_feature = [] for feat in self.numeric_feature: if self.data[feat].nunique() == len(self.data): if (self.data[feat].sum()- sum(self.data.index) == (self.data.iloc[0][feat]-self.data.index[0])*len(self.data)): index_feature.append(feat) self.drop_features(index_feature, "index") for i in index_feature: try: self.numeric_feature.remove(i) except ValueError: pass try: self.cat_feature.remove(i) except: pass def fill_missing_value_method(self, colm, method): if colm in self.numeric_feature: if method in supported_method['fillNa']['numeric']: if 'numFill' not in self.process_method.keys(): self.process_method['numFill'] = {} if method == 'na' and self.process_method['numFill'].get(colm, None): pass # don't overwrite else: self.process_method['numFill'][colm] = method if colm in self.cat_feature: if method in supported_method['fillNa']['categorical']: if 'catFill' not in self.process_method.keys(): self.process_method['catFill'] = {} if method == 'na' and self.process_method['catFill'].get(colm, None): pass else: self.process_method['catFill'][colm] = method def check_encoding_method(self, method, colm,default=False): if not self.is_target_available() and (method.lower() == list(target_encoding_method_change.keys())[0]): method = target_encoding_method_change[method.lower()] if default: self.log.info(f"Applying Label encoding instead of Target encoding on feature '{colm}' as target feature is not present") return method def fill_encoder_value_method(self,colm, method, default=False): if colm in self.cat_feature: if method.lower() in supported_method['categoryEncoding']: if 'catEncoder' not in self.process_method.keys(): self.process_method['catEncoder'] = {} if method == 'na' and self.process_method['catEncoder'].get(colm, None): pass else: self.process_method['catEncoder'][colm] = self.check_encoding_method(method, colm,default) else: self.log.info(f"-------> categorical encoding method '{method}' is not supported. supported methods are {supported_method['categoryEncoding']}") def fill_normalizer_method(self,colm, method): if colm in self.numeric_feature: if method in supported_method['normalization']: if 'normalization' not in self.process_method.keys(): self.process_method['normalization'] = {} if (method == 'na' or method == 'none') and self.process_method['normalization'].get(colm, None): pass else: self.process_method['normalization'][colm] = method else: self.log.info(f"-------> Normalization method '{method}' is not supported. supported methods are {supported_method['normalization']}") def apply_outlier(self): inlier_indices = np.array([True] * len(self.data)) if self.process_method.get('outlier', None): self.log.info('-------> Feature wise outlier detection:') for k,v in self.process_method['outlier'].items(): if k in self.numeric_feature: if v == 'iqr': index = findiqrOutlier(self.data[k]) elif v == 'zscore': index = findzscoreOutlier(self.data[k]) elif v == 'disable': index = None if k in self.process_method['outlierOperation'].keys(): if self.process_method['outlierOperation'][k] == 'dropdata': inlier_indices = np.logical_and(inlier_indices, index) elif self.process_method['outlierOperation'][k] == 'average': mean = self.data[k].mean() index = ~index self.data.loc[index,[k]] = mean self.log.info(f'-------> {k}: Replaced by Mean {mean}: total replacement {index.sum()}') elif self.process_method['outlierOperation'][k] == 'nochange' and v != 'disable': self.log.info(f'-------> Total outliers in "{k}": {(~index).sum()}') if self.config.get('outlierDetection',None): if self.config['outlierDetection'].get('IsolationForest','False') == 'True': index = findiforestOutlier(self.data[self.numeric_feature]) inlier_indices = np.logical_and(inlier_indices, index) self.log.info(f'-------> Numeric feature based Outlier detection(IsolationForest):') if inlier_indices.sum() != len(self.data): self.remove_rows( inlier_indices == False, 'outlier detection') self.log.info('Status:- |... Outlier treatment done') self.log.info(f'-------> Data Frame Shape After Outlier treatment (Rows,Columns): {self.data.shape}') def remove_rows(self, indices, msg=''): if indices.sum(): indices = ~indices if len(indices) != len(self.data): raise ValueError('Data Frame length mismatch') self.data = self.data[indices] self.data.reset_index(drop=True, inplace=True) if self.is_target_available(): self.target = self.target[indices] if isinstance(self.target, pd.Series): self.target.reset_index(drop=True, inplace=True) if not self.unprocessed.empty: self.unprocessed = self.unprocessed[indices] self.unprocessed.reset_index(drop=True, inplace=True) self.log.info(f'-------> {msg} dropped rows count: {(indices == False).sum()}') def fill_outlier_method(self,colm, method): if colm in self.numeric_feature: if method in supported_method['outlier_column_wise']: if 'outlier' not in self.process_method.keys(): self.process_method['outlier'] = {} if method != 'Disable': self.process_method['outlier'][colm] = method else: self.log.info(f"-------> outlier detection method '{method}' is not supported for column wise. supported methods are {supported_method['outlier_column_wise']}") def fill_outlier_process(self,colm, method): if colm in self.numeric_feature: if method in supported_method['outlierOperation']: if 'outlierOperation' not in self.process_method.keys(): self.process_method['outlierOperation'] = {} self.process_method['outlierOperation'][colm] = method else: self.log.info(f"-------> outlier process method '{method}' is not supported for column wise. supported methods are {supported_method['outlieroperation']}") def get_cat_imputer(self,method): if method == 'mode': return SimpleImputer(strategy='most_frequent') elif method == 'zero': return SimpleImputer(strategy='constant', fill_value=0) def get_cat_encoder(self,method): if method == 'labelencoding': return OrdinalEncoder(handle_unknown="error") elif method == 'onehotencoding': return OneHotEncoder(sparse=False,handle_unknown="error") elif method == 'targetencoding': if not self.is_target_available(): raise ValueError('Can not apply Target Encoding when target feature is not present') return TargetEncoder(handle_unknown='error') def get_num_imputer(self,method): if method == 'mode': return SimpleImputer(strategy='most_frequent') elif method == 'mean': return SimpleImputer(strategy='mean') elif method == 'median': return SimpleImputer(strategy='median') elif method == 'knnimputer': return KNNImputer() elif method == 'zero': return SimpleImputer(strategy='constant', fill_value=0) def get_num_scaler(self,method): if method == 'minmax': return MinMaxScaler() elif method == 'standardscaler': return StandardScaler() elif method == 'lognormal': return PowerTransformer(method='yeo-johnson', standardize=False) """ return self.codeText
functions.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ class global_function(): def __init__(self, tab_size=4): self.tab = ' ' * tab_size self.codeText = "" self.available_functions = { 'iqr':{'name':'iqrOutlier','code':f"\n\ndef iqrOutlier(df):\ \n{self.tab}Q1 = df.quantile(0.25)\ \n{self.tab}Q3 = df.quantile(0.75)\ \n{self.tab}IQR = Q3 - Q1\ \n{self.tab}index = ~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR))).any(axis=1)\ \n{self.tab}return index"}, 'zscore':{'name':'zscoreOutlier','imports':[{'mod':'stats','mod_from':'scipy'},{'mod':'numpy'}],'code':f"\n\ndef zscoreOutlier(df):\ \n{self.tab}z = numpy.abs(stats.zscore(df))\ \n{self.tab}index = (z < 3).all(axis=1)\ \n{self.tab}return index"}, 'iforest':{'name':'iforestOutlier','imports':[{'mod':'IsolationForest','mod_from':'sklearn.ensemble'}],'code':f"\n\ndef iforestOutlier(df):\ \n{self.tab}from sklearn.ensemble import IsolationForest\ \n{self.tab}isolation_forest = IsolationForest(n_estimators=100)\ \n{self.tab}isolation_forest.fit(df)\ \n{self.tab}y_pred_train = isolation_forest.predict(df)\ \n{self.tab}return y_pred_train == 1"}, 'minMaxImputer':{'name':'minMaxImputer','code':f"\n\nclass minMaxImputer(TransformerMixin):\ \n{self.tab}def __init__(self, strategy='max'):\ \n{self.tab}{self.tab}self.strategy = strategy\ \n{self.tab}def fit(self, X, y=None):\ \n{self.tab}{self.tab}self.feature_names_in_ = X.columns\ \n{self.tab}{self.tab}if self.strategy == 'min':\ \n{self.tab}{self.tab}{self.tab}self.statistics_ = X.min()\ \n{self.tab}{self.tab}else:\ \n{self.tab}{self.tab}{self.tab}self.statistics_ = X.max()\ \n{self.tab}{self.tab}return self\ \n{self.tab}def transform(self, X):\ \n{self.tab}{self.tab}import numpy\ \n{self.tab}{self.tab}return numpy.where(X.isna(), self.statistics_, X)"}, 'DummyEstimator':{'name':'DummyEstimator','code':f"\n\nclass DummyEstimator(BaseEstimator):\ \n{self.tab}def fit(self): pass\ \n{self.tab}def score(self): pass"}, 'start_reducer':{'name':'start_reducer','imports':[{'mod':'itertools'},{'mod':'numpy','mod_as':'np'},{'mod':'pandas','mod_as':'pd'},{'mod':'VarianceThreshold','mod_from':'sklearn.feature_selection'}], 'code':""" def start_reducer(df,target_feature,corr_threshold=0.85,var_threshold=0.05): qconstantColumns = [] train_features = df.columns.tolist() train_features.remove(target_feature) df = df.loc[:, (df != df.iloc[0]).any()] #remove constant feature numeric_features = df.select_dtypes(include='number').columns.tolist() non_numeric_features = df.select_dtypes(exclude='number').columns.tolist() if numeric_features and var_threshold: qconstantFilter = VarianceThreshold(threshold=var_threshold) tempDf=df[numeric_features] qconstantFilter.fit(tempDf) qconstantColumns = [column for column in numeric_features if column not in tempDf.columns[qconstantFilter.get_support()]] if target_feature in qconstantColumns: qconstantColumns.remove(target_feature) numeric_features = list(set(numeric_features) - set(qconstantColumns)) if numeric_features: numColPairs = list(itertools.product(numeric_features, numeric_features)) for item in numColPairs: if(item[0] == item[1]): numColPairs.remove(item) tempArray = [] for item in numColPairs: tempCorr = np.abs(df[item[0]].corr(df[item[1]])) if(tempCorr > corr_threshold): tempArray.append(item[0]) tempArray = np.unique(tempArray).tolist() nonsimilarNumericalCols = list(set(numeric_features) - set(tempArray)) groupedFeatures = [] if tempArray: corrDic = {} for feature in tempArray: temp = [] for col in tempArray: tempCorr = np.abs(df[feature].corr(df[col])) temp.append(tempCorr) corrDic[feature] = temp #Similar correlation df corrDF = pd.DataFrame(corrDic,index = tempArray) corrDF.loc[:,:] = np.tril(corrDF, k=-1) alreadyIn = set() similarFeatures = [] for col in corrDF: perfectCorr = corrDF[col][corrDF[col] > corr_threshold].index.tolist() if perfectCorr and col not in alreadyIn: alreadyIn.update(set(perfectCorr)) perfectCorr.append(col) similarFeatures.append(perfectCorr) updatedSimFeatures = [] for items in similarFeatures: if(target_feature != '' and target_feature in items): for p in items: updatedSimFeatures.append(p) else: updatedSimFeatures.append(items[0]) newTempFeatures = list(set(updatedSimFeatures + nonsimilarNumericalCols)) updatedFeatures = list(set(newTempFeatures + non_numeric_features)) else: updatedFeatures = list(set(df.columns) -set(qconstantColumns)) else: updatedFeatures = list(set(df.columns) -set(qconstantColumns)) return updatedFeatures """}, 'feature_importance_class':{'name':'feature_importance_class','code':"\n\ \ndef feature_importance_class(df, numeric_features, cat_features,target_feature,pValTh,corrTh):\ \n import pandas as pd\ \n from sklearn.feature_selection import chi2\ \n from sklearn.feature_selection import f_classif\ \n from sklearn.feature_selection import mutual_info_classif\ \n \ \n impFeatures = []\ \n if cat_features:\ \n categoricalData=df[cat_features]\ \n chiSqCategorical=chi2(categoricalData,df[target_feature])[1]\ \n corrSeries=pd.Series(chiSqCategorical, index=cat_features)\ \n impFeatures.append(corrSeries[corrSeries<pValTh].index.tolist())\ \n if numeric_features:\ \n quantData=df[numeric_features]\ \n fclassScore=f_classif(quantData,df[target_feature])[1]\ \n miClassScore=mutual_info_classif(quantData,df[target_feature])\ \n fClassSeries=pd.Series(fclassScore,index=numeric_features)\ \n miClassSeries=pd.Series(miClassScore,index=numeric_features)\ \n impFeatures.append(fClassSeries[fClassSeries<pValTh].index.tolist())\ \n impFeatures.append(miClassSeries[miClassSeries>corrTh].index.tolist())\ \n pearsonScore=df.corr() \ \n targetPScore=abs(pearsonScore[target_feature])\ \n impFeatures.append(targetPScore[targetPScore<pValTh].index.tolist())\ \n return list(set(sum(impFeatures, [])))"}, 'feature_importance_reg':{'name':'feature_importance_reg','code':"\n\ \ndef feature_importance_reg(df, numeric_features, target_feature,pValTh,corrTh):\ \n import pandas as pd\ \n from sklearn.feature_selection import f_regression\ \n from sklearn.feature_selection import mutual_info_regression\ \n \ \n impFeatures = []\ \n if numeric_features:\ \n quantData =df[numeric_features]\ \n fregScore=f_regression(quantData,df[target_feature])[1]\ \n miregScore=mutual_info_regression(quantData,df[target_feature])\ \n fregSeries=pd.Series(fregScore,index=numeric_features)\ \n miregSeries=pd.Series(miregScore,index=numeric_features)\ \n impFeatures.append(fregSeries[fregSeries<pValTh].index.tolist())\ \n impFeatures.append(miregSeries[miregSeries>corrTh].index.tolist())\ \n pearsonScore=df.corr()\ \n targetPScore=abs(pearsonScore[target_feature])\ \n impFeatures.append(targetPScore[targetPScore<pValTh].index.tolist())\ \n return list(set(sum(impFeatures, [])))"}, 'scoring_criteria':{'name':'scoring_criteria','imports':[{'mod':'make_scorer','mod_from':'sklearn.metrics'},{'mod':'roc_auc_score','mod_from':'sklearn.metrics'}], 'code':"\n\ \ndef scoring_criteria(score_param, problem_type, class_count):\ \n if problem_type == 'classification':\ \n scorer_mapping = {\ \n 'recall':{'binary_class': 'recall', 'multi_class': 'recall_weighted'},\ \n 'precision':{'binary_class': 'precision', 'multi_class': 'precision_weighted'},\ \n 'f1_score':{'binary_class': 'f1', 'multi_class': 'f1_weighted'},\ \n 'roc_auc':{'binary_class': 'roc_auc', 'multi_class': 'roc_auc_ovr_weighted'}\ \n }\ \n if (score_param.lower() == 'roc_auc') and (class_count > 2):\ \n score_param = make_scorer(roc_auc_score, needs_proba=True,multi_class='ovr',average='weighted')\ \n else:\ \n class_type = 'binary_class' if class_count == 2 else 'multi_class'\ \n if score_param in scorer_mapping.keys():\ \n score_param = scorer_mapping[score_param][class_type]\ \n else:\ \n score_param = 'accuracy'\ \n return score_param"}, 'log_dataframe':{'name':'log_dataframe','code':f"\n\ \ndef log_dataframe(df, msg=None):\ \n import io\ \n buffer = io.StringIO()\ \n df.info(buf=buffer)\ \n if msg:\ \n log_text = f'Data frame after {{msg}}:'\ \n else:\ \n log_text = 'Data frame:'\ \n log_text += '\\n\\t'+str(df.head(2)).replace('\\n','\\n\\t')\ \n log_text += ('\\n\\t' + buffer.getvalue().replace('\\n','\\n\\t'))\ \n get_logger().info(log_text)"}, 'BayesSearchCV':{'name':'BayesSearchCV','imports':[{'mod':'cross_val_score','mod_from':'sklearn.model_selection'},{'mod':'fmin','mod_from':'hyperopt'},{'mod':'tpe','mod_from':'hyperopt'},{'mod':'hp','mod_from':'hyperopt'},{'mod':'STATUS_OK','mod_from':'hyperopt'},{'mod':'Trials','mod_from':'hyperopt'},{'mod':'numpy','mod_as':'np'}],'code':"\n\ \nclass BayesSearchCV():\ \n\ \n def __init__(self, estimator, params, scoring, n_iter, cv):\ \n self.estimator = estimator\ \n self.params = params\ \n self.scoring = scoring\ \n self.iteration = n_iter\ \n self.cv = cv\ \n self.best_estimator_ = None\ \n self.best_score_ = None\ \n self.best_params_ = None\ \n\ \n def __min_fun(self, params):\ \n score=cross_val_score(self.estimator, self.X, self.y,scoring=self.scoring,cv=self.cv)\ \n acc = score.mean()\ \n return {'loss':-acc,'score': acc, 'status': STATUS_OK,'model' :self.estimator,'params': params}\ \n\ \n def fit(self, X, y):\ \n trials = Trials()\ \n self.X = X\ \n self.y = y\ \n best = fmin(self.__min_fun,self.params,algo=tpe.suggest, max_evals=self.iteration, trials=trials)\ \n result = sorted(trials.results, key = lambda x: x['loss'])[0]\ \n self.best_estimator_ = result['model']\ \n self.best_score_ = result['score']\ \n self.best_params_ = result['params']\ \n self.best_estimator_.fit(X, y)\ \n\ \n def hyperOptParamConversion( paramSpace):\ \n paramDict = {}\ \n for j in list(paramSpace.keys()):\ \n inp = paramSpace[j]\ \n isLog = False\ \n isLin = False\ \n isRan = False\ \n isList = False\ \n isString = False\ \n try:\ \n # check if functions are given as input and reassign paramspace\ \n v = paramSpace[j]\ \n if 'logspace' in paramSpace[j]:\ \n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\ \n isLog = True\ \n elif 'linspace' in paramSpace[j]:\ \n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\ \n isLin = True\ \n elif 'range' in paramSpace[j]:\ \n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\ \n isRan = True\ \n elif 'list' in paramSpace[j]:\ \n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\ \n isList = True\ \n elif '[' and ']' in paramSpace[j]:\ \n paramSpace[j] = v.split('[')[1].split(']')[0].replace(' ', '')\ \n isList = True\ \n x = paramSpace[j].split(',')\ \n except:\ \n x = paramSpace[j]\ \n str_arg = paramSpace[j]\ \n\ \n # check if arguments are string\ \n try:\ \n test = eval(x[0])\ \n except:\ \n isString = True\ \n\ \n if isString:\ \n paramDict.update({j: hp.choice(j, x)})\ \n else:\ \n res = eval(str_arg)\ \n if isLin:\ \n y = eval('np.linspace' + str(res))\ \n paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))})\ \n elif isLog:\ \n y = eval('np.logspace' + str(res))\ \n paramDict.update(\ \n {j: hp.uniform(j, 10 ** eval(x[0]), 10 ** eval(x[1]))})\ \n elif isRan:\ \n y = eval('np.arange' + str(res))\ \n paramDict.update({j: hp.choice(j, y)})\ \n # check datatype of argument\ \n elif isinstance(eval(x[0]), bool):\ \n y = list(map(lambda i: eval(i), x))\ \n paramDict.update({j: hp.choice(j, eval(str(y)))})\ \n elif isinstance(eval(x[0]), float):\ \n res = eval(str_arg)\ \n if len(str_arg.split(',')) == 3 and not isList:\ \n y = eval('np.linspace' + str(res))\ \n #print(y)\ \n paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))})\ \n else:\ \n y = list(res) if isinstance(res, tuple) else [res]\ \n paramDict.update({j: hp.choice(j, y)})\ \n else:\ \n res = eval(str_arg)\ \n if len(str_arg.split(',')) == 3 and not isList:\ \n y = eval('np.linspace' +str(res)) if eval(x[2]) >= eval(x[1]) else eval('np.arange'+str(res))\ \n else:\ \n y = list(res) if isinstance(res, tuple) else [res]\ \n paramDict.update({j: hp.choice(j, y)})\ \n return paramDict"}, 's2n':{'name':'s2n','imports':[{'mod':'word2number','mod_as':'w2n'},{'mod':'numpy','mod_as':'np'}],'code':"\n\ \ndef s2n(value):\ \n try:\ \n x=eval(value)\ \n return x\ \n except:\ \n try:\ \n return w2n.word_to_num(value)\ \n except:\ \n return np.nan"}, 'readWrite':{'name':'readWrite','imports':[{'mod':'json'},{'mod':'pandas','mod_as':'pd'}],'code':"\n\ \ndef read_json(file_path):\ \n data = None\ \n with open(file_path,'r') as f:\ \n data = json.load(f)\ \n return data\ \n\ \ndef write_json(data, file_path):\ \n with open(file_path,'w') as f:\ \n json.dump(data, f)\ \n\ \ndef read_data(file_path, encoding='utf-8', sep=','):\ \n return pd.read_csv(file_path, encoding=encoding, sep=sep)\ \n\ \ndef write_data(data, file_path, index=False):\ \n return data.to_csv(file_path, index=index)\ \n\ \n#Uncomment and change below code for google storage\ \n#def write_data(data, file_path, index=False):\ \n# file_name= file_path.name\ \n# data.to_csv('output_data.csv')\ \n# storage_client = storage.Client()\ \n# bucket = storage_client.bucket('aion_data')\ \n# bucket.blob('prediction/'+file_name).upload_from_filename('output_data.csv', content_type='text/csv')\ \n# return data\ \n\ \ndef is_file_name_url(file_name):\ \n supported_urls_starts_with = ('gs://','https://','http://')\ \n return file_name.startswith(supported_urls_starts_with)\ \n"}, 'logger':{'name':'set_logger','imports':[{'mod':'logging'}],'code':f"\n\ \nlog = None\ \ndef set_logger(log_file, mode='a'):\ \n global log\ \n logging.basicConfig(filename=log_file, filemode=mode, format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S')\ \n log = logging.getLogger(Path(__file__).parent.name)\ \n return log\ \n\ \ndef get_logger():\ \n return log\n"}, 'mlflowSetPath':{'name':'mlflowSetPath','code':f"\n\ndef mlflowSetPath(path, name):\ \n{self.tab}db_name = str(Path(path)/'mlruns')\ \n{self.tab}mlflow.set_tracking_uri('file:///' + db_name)\ \n{self.tab}mlflow.set_experiment(str(Path(path).name))\ \n"}, 'mlflow_create_experiment':{'name':'mlflow_create_experiment','code':f"\n\ndef mlflow_create_experiment(config, path, name):\ \n{self.tab}tracking_uri, artifact_uri, registry_uri = get_mlflow_uris(config, path)\ \n{self.tab}mlflow.tracking.set_tracking_uri(tracking_uri)\ \n{self.tab}mlflow.tracking.set_registry_uri(registry_uri)\ \n{self.tab}client = mlflow.tracking.MlflowClient()\ \n{self.tab}experiment = client.get_experiment_by_name(name)\ \n{self.tab}if experiment:\ \n{self.tab}{self.tab}experiment_id = experiment.experiment_id\ \n{self.tab}else:\ \n{self.tab}{self.tab}experiment_id = client.create_experiment(name, artifact_uri)\ \n{self.tab}return client, experiment_id\ \n"}, 'get_mlflow_uris':{'name':'get_mlflow_uris','code':f"\n\ndef get_mlflow_uris(config, path):\ \n artifact_uri = None\ \n tracking_uri_type = config.get('tracking_uri_type',None)\ \n if tracking_uri_type == 'localDB':\ \n tracking_uri = 'sqlite:///' + str(path.resolve()/'mlruns.db')\ \n elif tracking_uri_type == 'server' and config.get('tracking_uri', None):\ \n tracking_uri = config['tracking_uri']\ \n if config.get('artifacts_uri', None):\ \n if Path(config['artifacts_uri']).exists():\ \n artifact_uri = 'file:' + config['artifacts_uri']\ \n else:\ \n artifact_uri = config['artifacts_uri']\ \n else:\ \n artifact_uri = 'file:' + str(path.resolve()/'mlruns')\ \n else:\ \n tracking_uri = 'file:' + str(path.resolve()/'mlruns')\ \n artifact_uri = None\ \n if config.get('registry_uri', None):\ \n registry_uri = config['registry_uri']\ \n else:\ \n registry_uri = 'sqlite:///' + str(path.resolve()/'registry.db')\ \n return tracking_uri, artifact_uri, registry_uri\ \n"}, 'logMlflow':{'name':'logMlflow','code':f"\n\ndef logMlflow( params, metrices, estimator,tags={{}}, algoName=None):\ \n{self.tab}run_id = None\ \n{self.tab}for k,v in params.items():\ \n{self.tab}{self.tab}mlflow.log_param(k, v)\ \n{self.tab}for k,v in metrices.items():\ \n{self.tab}{self.tab}mlflow.log_metric(k, v)\ \n{self.tab}if 'CatBoost' in algoName:\ \n{self.tab}{self.tab}model_info = mlflow.catboost.log_model(estimator, 'model')\ \n{self.tab}else:\ \n{self.tab}{self.tab}model_info = mlflow.sklearn.log_model(sk_model=estimator, artifact_path='model')\ \n{self.tab}tags['processed'] = 'no'\ \n{self.tab}tags['registered'] = 'no'\ \n{self.tab}mlflow.set_tags(tags)\ \n{self.tab}if model_info:\ \n{self.tab}{self.tab}run_id = model_info.run_id\ \n{self.tab}return run_id\ \n"}, 'classification_metrices':{'name':'classification_metrices','imports':[{'mod':'sklearn'},{'mod':'math'}],'code':"\ndef get_classification_metrices( actual_values, predicted_values):\ \n result = {}\ \n accuracy_score = sklearn.metrics.accuracy_score(actual_values, predicted_values)\ \n avg_precision = sklearn.metrics.precision_score(actual_values, predicted_values,\ \n average='macro')\ \n avg_recall = sklearn.metrics.recall_score(actual_values, predicted_values,\ \n average='macro')\ \n avg_f1 = sklearn.metrics.f1_score(actual_values, predicted_values,\ \n average='macro')\ \n\ \n result['accuracy'] = math.floor(accuracy_score*10000)/100\ \n result['precision'] = math.floor(avg_precision*10000)/100\ \n result['recall'] = math.floor(avg_recall*10000)/100\ \n result['f1'] = math.floor(avg_f1*10000)/100\ \n return result\ \n"}, 'regression_metrices':{'name':'regression_metrices','imports':[{'mod':'numpy', 'mod_as':'np'}],'code':"\ndef get_regression_metrices( actual_values, predicted_values):\ \n result = {}\ \n\ \n me = np.mean(predicted_values - actual_values)\ \n sde = np.std(predicted_values - actual_values, ddof = 1)\ \n\ \n abs_err = np.abs(predicted_values - actual_values)\ \n mae = np.mean(abs_err)\ \n sdae = np.std(abs_err, ddof = 1)\ \n\ \n abs_perc_err = 100.*np.abs(predicted_values - actual_values) / actual_values\ \n mape = np.mean(abs_perc_err)\ \n sdape = np.std(abs_perc_err, ddof = 1)\ \n\ \n result['mean_error'] = me\ \n result['mean_abs_error'] = mae\ \n result['mean_abs_perc_error'] = mape\ \n result['error_std'] = sde\ \n result['abs_error_std'] = sdae\ \n result['abs_perc_error_std'] = sdape\ \n return result\ \n"} } def add_function(self, name, importer=None): if name in self.available_functions.keys(): self.codeText += self.available_functions[name]['code'] if importer: if 'imports' in self.available_functions[name].keys(): for module in self.available_functions[name]['imports']: mod_name = module['mod'] mod_from = module.get('mod_from', None) mod_as = module.get('mod_as', None) importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as) def get_function_name(self, name): if name in self.available_functions.keys(): return self.available_functions[name]['name'] return None def getCode(self): return self.codeText
register.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ import json class register(): def __init__(self, importer, indent=0, tab_size=4): self.tab = " "*tab_size self.codeText = "" self.function_code = "" self.importer = importer self.input_files = {} self.output_files = {} self.addInputFiles({'log' : 'aion.log', 'metaData' : 'modelMetaData.json','model' : 'model.pkl', 'performance': 'performance.json','production':'production.json','monitor':'monitoring.json'}) def addInputFiles(self, files): if not isinstance(files, dict): raise TypeError(f"Required dict type got {type(files)} type") for k,v in files.items(): self.input_files[k] = v def addOutputFiles(self, files): if not isinstance(files, dict): raise TypeError(f"Required dict type got {type(files)} type") for k,v in files.items(): self.input_files[k] = v def getInputFiles(self): text = 'IOFiles = ' if not self.input_files: text += '{ }' else: text += json.dumps(self.input_files, indent=4) return text def getOutputFiles(self): text = 'output_file = ' if not self.output_files: text += '{ }' else: text += json.dumps(self.output_files, indent=4) return text def getInputOutputFiles(self, indent=0): text = '\n' text += self.getInputFiles() if indent: text = text.replace('\n', self.tab * indent + '\n') return text def code_imports(self): modules = [{'module':'sys'} ,{'module':'json'} ,{'module':'time'} ,{'module':'platform'} ,{'module':'tempfile'} ,{'module':'sqlite3'} ,{'module':'mlflow'} ,{'module':'Path', 'mod_from':'pathlib'} ,{'module':'ViewType', 'mod_from':'mlflow.entities'} ,{'module':'MlflowClient', 'mod_from':'mlflow.tracking'} ,{'module':'ModelVersionStatus', 'mod_from':'mlflow.entities.model_registry.model_version_status'} ] self.import_modules(modules) def import_module(self, module, mod_from=None, mod_as=None): self.importer.addModule(module, mod_from=mod_from, mod_as=mod_as) def import_modules(self, modules): if isinstance(modules, list): for mod in modules: if isinstance(mod, dict): self.importer.addModule(mod['module'], mod_from= mod.get('mod_from', None), mod_as=mod.get('mod_as', None)) def getImportCode(self): return self.importer.getCode() def __addValidateConfigCode(self, models=None): text = "\n\ \ndef validateConfig():\ \n config_file = Path(__file__).parent/'config.json'\ \n if not Path(config_file).exists():\ \n raise ValueError(f'Config file is missing: {config_file}')\ \n config = read_json(config_file)\ \n return config\ " return text def addLocalFunctionsCode(self, models): self.function_code += self.__addValidateConfigCode(models) def addPrefixCode(self, indent=1): self.code_imports() self.codeText += "\n\ \ndef __merge_logs(log_file_sequence,path, files):\ \n if log_file_sequence['first'] in files:\ \n with open(path/log_file_sequence['first'], 'r') as f:\ \n main_log = f.read()\ \n files.remove(log_file_sequence['first'])\ \n for file in files:\ \n with open(path/file, 'r') as f:\ \n main_log = main_log + f.read()\ \n (path/file).unlink()\ \n with open(path/log_file_sequence['merged'], 'w') as f:\ \n f.write(main_log)\ \n\ \ndef merge_log_files(folder, models):\ \n log_file_sequence = {\ \n 'first': 'aion.log',\ \n 'merged': 'aion.log'\ \n }\ \n log_file_suffix = '_aion.log'\ \n log_files = [x+log_file_suffix for x in models if (folder/(x+log_file_suffix)).exists()]\ \n log_files.append(log_file_sequence['first'])\ \n __merge_logs(log_file_sequence, folder, log_files)\ \n\ \ndef register_model(targetPath,models,usecasename, meta_data):\ \n register = mlflow_register(targetPath, usecasename, meta_data)\ \n register.setup_registration()\ \n\ \n runs_with_score = register.get_unprocessed_runs(models)\ \n best_run = register.get_best_run(runs_with_score)\ \n register.update_unprocessed(runs_with_score)\ \n return register.register_model(models, best_run)\ \n\ \ndef register(log):\ \n config = validateConfig()\ \n targetPath = Path('aion')/config['targetPath']\ \n models = config['models']\ \n merge_log_files(targetPath, models)\ \n meta_data_file = targetPath/IOFiles['metaData']\ \n if meta_data_file.exists():\ \n meta_data = read_json(meta_data_file)\ \n else:\ \n raise ValueError(f'Configuration file not found: {meta_data_file}')\ \n usecase = config['targetPath']\ \n # enable logging\ \n log_file = targetPath/IOFiles['log']\ \n log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)\ \n register_model_name = register_model(targetPath,models,usecase, meta_data)\ \n status = {'Status':'Success','Message':f'Model Registered: {register_model_name}'}\ \n log.info(f'output: {status}')\ \n return json.dumps(status)" def getMainCodeModules(self): modules = [{'module':'Path', 'mod_from':'pathlib'} ,{'module':'sys'} ,{'module':'os'} ,{'module':'json'} ,{'module':'logging'} ,{'module':'shutil'} ,{'module':'argparse'} ] return modules def addMainCode(self, models, indent=1): self.codeText += "\n\ \nif __name__ == '__main__':\ \n log = None\ \n try:\ \n print(register(log))\ \n except Exception as e:\ \n if log:\ \n log.error(e, exc_info=True)\ \n status = {'Status':'Failure','Message':str(e)}\ \n print(json.dumps(status))" def addStatement(self, statement, indent=1): self.codeText += f"\n{self.tab * indent}{statement}" def query_with_quetes_code(self, decs=True, indent=1): return """\n{first_indentation}def __get_unprocessed_runs_sorted(self): {indentation}query = "tags.processed = 'no'" {indentation}runs = self.client.search_runs( {indentation} experiment_ids=self.experiment_id, {indentation} filter_string=query, {indentation} run_view_type=ViewType.ACTIVE_ONLY, {indentation} order_by=['metrics.test_score {0}'] {indentation}) {indentation}return runs\n""".format('DESC' if decs else 'ASC', first_indentation=indent*self.tab, indentation=(1+indent)*self.tab) def addClassCode(self, smaller_is_better=False): self.codeText += "\ \nclass mlflow_register():\ \n\ \n def __init__(self, input_path, model_name, meta_data):\ \n self.input_path = Path(input_path).resolve()\ \n self.model_name = model_name\ \n self.meta_data = meta_data\ \n self.logger = logging.getLogger('ModelRegister')\ \n self.client = None\ \n self.monitoring_data = read_json(self.input_path/IOFiles['monitor'])\ \n mlflow_default_config = {'artifacts_uri':'','tracking_uri_type':'','tracking_uri':'','registry_uri':''}\ \n if not self.monitoring_data.get('mlflow_config',False):\ \n self.monitoring_data['mlflow_config'] = mlflow_default_config\ \n\ \n def setup_registration(self):\ \n tracking_uri, artifact_uri, registry_uri = get_mlflow_uris(self.monitoring_data['mlflow_config'],self.input_path)\ \n self.logger.info(f'MLflow tracking uri: {tracking_uri}')\ \n self.logger.info(f'MLflow registry uri: {registry_uri}')\ \n mlflow.set_tracking_uri(tracking_uri)\ \n mlflow.set_registry_uri(registry_uri)\ \n self.client = mlflow.tracking.MlflowClient(\ \n tracking_uri=tracking_uri,\ \n registry_uri=registry_uri,\ \n )\ \n self.experiment_id = self.client.get_experiment_by_name(self.model_name).experiment_id\ \n" self.codeText += self.query_with_quetes_code(smaller_is_better == False) self.codeText += "\ \n def __log_unprocessed_runs(self, runs):\ \n self.logger.info('Unprocessed runs:')\ \n for run in runs:\ \n self.logger.info(' {}: {}'.format(run.info.run_id,run.data.metrics['test_score']))\ \n\ \n def get_unprocessed_runs(self, model_path):\ \n unprocessed_runs = self.__get_unprocessed_runs_sorted()\ \n if not unprocessed_runs:\ \n raise ValueError('Registering fail: No new trained model')\ \n self.__log_unprocessed_runs( unprocessed_runs)\ \n return unprocessed_runs\ \n\ \n def __wait_until_ready(self, model_name, model_version):\ \n client = MlflowClient()\ \n for _ in range(10):\ \n model_version_details = self.client.get_model_version(\ \n name=model_name,\ \n version=model_version,\ \n )\ \n status = ModelVersionStatus.from_string(model_version_details.status)\ \n if status == ModelVersionStatus.READY:\ \n break\ \n time.sleep(1)\ \n\ \n def __create_model(self, run):\ \n artifact_path = 'model'\ \n model_uri = 'runs:/{run_id}/{artifact_path}'.format(run_id=run.info.run_id, artifact_path=artifact_path)\ \n self.logger.info(f'Registering model (run id): {run.info.run_id}')\ \n model_details = mlflow.register_model(model_uri=model_uri, name=self.model_name)\ \n self.__wait_until_ready(model_details.name, model_details.version)\ \n self.client.set_tag(run.info.run_id, 'registered', 'yes' )\ \n state_transition = self.client.transition_model_version_stage(\ \n name=model_details.name,\ \n version=model_details.version,\ \n stage='Production',\ \n )\ \n self.logger.info(state_transition)\ \n return model_details\ \n\ \n def get_best_run(self, models):\ \n return models[0]\ \n\ \n def __validate_config(self):\ \n try:\ \n load_data_loc = self.meta_data['load_data']['Status']['DataFilePath']\ \n except KeyError:\ \n raise ValueError('DataIngestion step output is corrupted')\ \n\ \n def __mlflow_log_transformer_steps(self, best_run):\ \n run_id = best_run.info.run_id\ \n meta_data = read_json(self.input_path/(best_run.data.tags['mlflow.runName']+'_'+IOFiles['metaData']))\ \n self.__validate_config()\ \n with mlflow.start_run(run_id):\ \n if 'transformation' in meta_data.keys():\ \n if 'target_encoder' in meta_data['transformation'].keys():\ \n source_loc = meta_data['transformation']['target_encoder']\ \n mlflow.log_artifact(str(self.input_path/source_loc))\ \n meta_data['transformation']['target_encoder'] = Path(source_loc).name\ \n if 'preprocessor' in meta_data['transformation'].keys():\ \n source_loc = meta_data['transformation']['preprocessor']\ \n mlflow.log_artifact(str(self.input_path/source_loc))\ \n meta_data['transformation']['preprocessor'] = Path(source_loc).name\ \n\ \n write_json(meta_data, self.input_path/IOFiles['metaData'])\ \n mlflow.log_artifact(str(self.input_path/IOFiles['metaData']))\ \n\ \n def __update_processing_tag(self, processed_runs):\ \n self.logger.info('Changing status to processed:')\ \n for run in processed_runs:\ \n self.client.set_tag(run.info.run_id, 'processed', 'yes')\ \n self.logger.info(f' run id: {run.info.run_id}')\ \n\ \n def update_unprocessed(self, runs):\ \n return self.__update_processing_tag( runs)\ \n\ \n def __force_register(self, best_run):\ \n self.__create_model( best_run)\ \n self.__mlflow_log_transformer_steps( best_run)\ \n production_json = self.input_path/IOFiles['production']\ \n production_model = {'Model':best_run.data.tags['mlflow.runName'],'runNo':self.monitoring_data['runNo'],'score':best_run.data.metrics['test_score']}\ \n write_json(production_model, production_json)\ \n database_path = self.input_path/(self.input_path.stem + '.db')\ \n if database_path.exists():\ \n database_path.unlink()\ \n return best_run.data.tags['mlflow.runName']\ \n\ \n def __get_register_model_score(self):\ \n reg = self.client.list_registered_models()\ \n if not reg:\ \n return '', 0\ \n run_id = reg[0].latest_versions[0].run_id\ \n run = self.client.get_run(run_id)\ \n score = run.data.metrics['test_score']\ \n return run_id, score\ \n\ \n def register_model(self, models, best_run):\ \n return self.__force_register(best_run)" def local_functions_code(self, smaller_is_better=True, indent=1): if smaller_is_better: min_max = 'min' else: min_max = 'max' self.codeText += "\ndef validate_config(deploy_dict):\ \n try:\ \n load_data_loc = deploy_dict['load_data']['Status']['DataFilePath']\ \n except KeyError:\ \n raise ValueError('DataIngestion step output is corrupted')\ \n\ \ndef get_digest(fname):\ \n import hashlib\ \n hash_algo = hashlib.sha256()\ \n with open(fname, 'rb') as f:\ \n for chunk in iter(lambda: f.read(2 ** 20), b''):\ \n hash_algo.update(chunk)\ \n return hash_algo.hexdigest()\ \n" def getCode(self, indent=1): return self.function_code + '\n' + self.codeText
__init__.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from .imports import importModule from .load_data import tabularDataReader from .transformer import transformer as profiler from .transformer import data_profiler from .selector import selector from .trainer import learner from .register import register from .deploy import deploy from .drift_analysis import drift from .functions import global_function from .data_reader import data_reader from .utility import utility_function
load_data.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ import json class tabularDataReader(): def __init__(self, tab_size=4): self.tab = ' ' * tab_size self.function_code = '' self.codeText = '' self.code_generated = False def getInputFiles(self): IOFiles = { "rawData": "rawData.dat", "metaData" : "modelMetaData.json", "log" : "aion.log", "outputData" : "rawData.dat", "monitoring":"monitoring.json", "prodData": "prodData", "prodDataGT":"prodDataGT" } text = 'IOFiles = ' if not IOFiles: text += '{ }' else: text += json.dumps(IOFiles, indent=4) return text def getOutputFiles(self): output_files = { 'metaData' : 'modelMetaData.json', 'log' : 'aion.log', 'outputData' : 'rawData.dat' } text = 'output_file = ' if not output_files: text += '{ }' else: text += json.dumps(output_files, indent=4) return text def getInputOutputFiles(self, indent=0): text = '\n' text += self.getInputFiles() if indent: text = text.replace('\n', self.tab * indent + '\n') return text def __addValidateConfigCode(self): text = "\n\ \ndef validateConfig():\ \n config_file = Path(__file__).parent/'config.json'\ \n if not Path(config_file).exists():\ \n raise ValueError(f'Config file is missing: {config_file}')\ \n config = read_json(config_file)\ \n if not config['targetPath']:\ \n raise ValueError(f'Target Path is not configured')\ \n return config" return text def addMainCode(self): self.codeText += "\n\ \nif __name__ == '__main__':\ \n log = None\ \n try:\ \n print(load_data(log))\ \n except Exception as e:\ \n if log:\ \n log.getLogger().error(e, exc_info=True)\ \n status = {'Status':'Failure','Message':str(e)}\ \n print(json.dumps(status))\ \n raise Exception(str(e))\ " def addLoadDataCode(self): self.codeText += """ #This function will read the data and save the data on persistent storage def load_data(log): config = validateConfig() targetPath = Path('aion')/config['targetPath'] targetPath.mkdir(parents=True, exist_ok=True) log_file = targetPath/IOFiles['log'] log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem) monitoring = targetPath/IOFiles['monitoring'] if monitoring.exists(): monitoringStatus = read_json(monitoring) if monitoringStatus['dataLocation'] == '' and monitoringStatus['driftStatus'] != 'No Drift': reader = dataReader(reader_type=monitoring_data.get('prod_db_type','sqlite'),target_path=targetPath, config=config.get('db_config',None)) raw_data_location = targetPath/IOFiles['rawData'] if reader.file_exists(IOFiles['prodData']) and reader.file_exists(IOFiles['prodDataGT']): predicted_data = reader.read(IOFiles['prodData']) actual_data = reader.read(IOFiles['prodDataGT']) common_col = [k for k in predicted_data.columns.tolist() if k in actual_data.columns.tolist()] mergedRes = pd.merge(actual_data, predicted_data, on =common_col,how = 'inner') raw_data_path = pd.read_csv(raw_data_location) df = pd.concat([raw_data_path,mergedRes]) else: raise ValueError(f'Prod Data not found') elif monitoringStatus['dataLocation'] == '': raise ValueError(f'Data Location does not exist') else: if 's3' in monitoringStatus.keys(): input_reader = dataReader(reader_type='s3',target_path=None, config=monitoringStatus['s3']) log.info(f"Downloading '{monitoringStatus['s3']['file_name']}' from s3 bucket '{monitoringStatus['s3']['bucket_name']}'") df = input_reader.read(monitoringStatus['s3']['file_name']) else: location = monitoringStatus['dataLocation'] log.info(f'Dataset path: {location}') df = read_data(location) else: raise ValueError(f'Monitoring.json does not exist') status = {} output_data_path = targetPath/IOFiles['outputData'] log.log_dataframe(df) required_features = list(set(config['selected_features'] + [config['target_feature']])) log.info('Dataset features required: ' + ','.join(required_features)) missing_features = [x for x in required_features if x not in df.columns.tolist()] if missing_features: raise ValueError(f'Some feature/s is/are missing: {missing_features}') log.info('Removing unused features: '+','.join(list(set(df.columns) - set(required_features)))) df = df[required_features] log.info(f'Required features: {required_features}') try: log.info(f'Saving Dataset: {str(output_data_path)}') write_data(df, output_data_path, index=False) status = {'Status':'Success','DataFilePath':IOFiles['outputData'],'Records':len(df)} except: raise ValueError('Unable to create data file') meta_data_file = targetPath/IOFiles['metaData'] meta_data = dict() meta_data['load_data'] = {} meta_data['load_data']['selected_features'] = [x for x in config['selected_features'] if x != config['target_feature']] meta_data['load_data']['Status'] = status write_json(meta_data, meta_data_file) output = json.dumps(status) log.info(output) return output """ def addValidateConfigCode(self, indent=1): self.function_code += self.__addValidateConfigCode() def generateCode(self): self.addValidateConfigCode() self.addLoadDataCode() self.addMainCode() self.code_generated = True def addStatement(self, statement, indent=1): self.codeText += '\n' + self.tab * indent + statement def getCode(self): if not self.code_generated: self.generateCode() return self.function_code + '\n' + self.codeText
input_drift.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from pathlib import Path import json from mlac.ml.core import * from .utility import * def run_input_drift(config): importer = importModule() drifter = input_drift() importer.addModule('sys') importer.addModule('json') importer.addModule('mlflow') importer.addModule('platform') importer.addModule('warnings') importer.addModule('numpy', mod_as='np') importer.addModule('pandas', mod_as='pd') importer.addModule('stats', mod_from='scipy', mod_as='st') importer.addModule('Path', mod_from='pathlib') code = file_header(config['modelName']+'_'+config['modelVersion']) code += importer.getCode() drifter.generateCode() code += drifter.getCode() deploy_path = Path(config["deploy_path"])/'MLaC'/'InputDrift' deploy_path.mkdir(parents=True, exist_ok=True) py_file = deploy_path/"input_drift.py" with open(py_file, "w") as f: f.write(code) req_file = deploy_path/"requirements.txt" with open(req_file, "w") as f: f.write(importer.getBaseModule()) create_docker_file('input_drift', deploy_path)
output_drift.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from pathlib import Path import json from mlac.ml.core import * from .utility import * def run_output_drift(config): importer = importModule() drifter = output_drift(missing = get_variable('fillna', False), word2num_features= get_variable('word2num_features', False), cat_encoder = get_variable('cat_encoder', False),target_encoder = get_variable('target_encoder', False),normalizer = get_variable('normalizer', False),text_profiler = get_variable('text_features', False),feature_reducer = get_variable('feature_reducer', False),score_smaller_is_better = get_variable('smaller_is_better', False),problem_type=config['problem_type']) function = global_function() importer.addModule('sys') importer.addModule('math') importer.addModule('json') importer.addModule('platform') importer.addModule('joblib') importer.addModule('mlflow') importer.addModule('sklearn') importer.addModule('numpy', mod_as='np') importer.addModule('pandas', mod_as='pd') importer.addModule('Path', mod_from='pathlib') importer.addModule('InfluxDBClient', mod_from='influxdb') function.add_function('readWrite') code = file_header(config['modelName']+'_'+config['modelVersion']) code += importer.getCode() code += function.getCode() drifter.generateCode() code += drifter.getCode() deploy_path = Path(config["deploy_path"])/'MLaC'/'OutputDrift' deploy_path.mkdir(parents=True, exist_ok=True) py_file = deploy_path/"output_drift.py" with open(py_file, "w") as f: f.write(code) req_file = deploy_path/"requirements.txt" with open(req_file, "w") as f: f.write(importer.getBaseModule()) create_docker_file('output_drift', deploy_path)
deploy.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ import shutil from pathlib import Path import json from mlac.ml.core import * from .utility import * import tarfile def add_text_dependency(): return """nltk==3.6.3 textblob==0.15.3 spacy==3.1.3 demoji==1.1.0 bs4==0.0.1 text_unidecode==1.3 contractions==0.1.73 """ def get_deploy_params(config): param_keys = ["modelVersion","problem_type","target_feature"] data = {key:value for (key,value) in config.items() if key in param_keys} data['targetPath'] = config['modelName'] data['ipAddress'] = '127.0.0.1' data['portNo'] = '8094' return data def import_trainer_module(importer): non_sklearn_modules = get_variable('non_sklearn_modules') if non_sklearn_modules: for mod in non_sklearn_modules: module = get_module_mapping(mod) mod_from = module.get('mod_from',None) mod_as = module.get('mod_as',None) importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as) imported_modules = [ {'module': 'sys', 'mod_from': None, 'mod_as': None}, {'module': 'math', 'mod_from': None, 'mod_as': None}, {'module': 'json', 'mod_from': None, 'mod_as': None}, {'module': 'scipy', 'mod_from': None, 'mod_as': None}, {'module': 'joblib', 'mod_from': None, 'mod_as': None}, {'module': 'shutil', 'mod_from': None, 'mod_as': None}, {'module': 'mlflow', 'mod_from': None, 'mod_as': None}, {'module': 'sklearn', 'mod_from': None, 'mod_as': None}, {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}, {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, {'module': 'argparse', 'mod_from': None, 'mod_as': None}, {'module': 'platform', 'mod_from': None, 'mod_as': None} ] def run_deploy(config): generated_files = [] importer = importModule() deployer = deploy(target_encoder = get_variable('target_encoder', False),feature_reducer = get_variable('feature_reducer', False),score_smaller_is_better = get_variable('smaller_is_better', False)) function = global_function() importModules(importer, imported_modules) if get_variable('cat_encoder', False): importer.addModule('category_encoders') import_trainer_module(importer) if get_variable('word2num_features'): function.add_function('s2n', importer) if get_variable('text_features'): importer.addLocalModule('textProfiler', mod_from='text.textProfiler') usecase = config['modelName']+'_'+config['modelVersion'] deploy_path = Path(config["deploy_path"])/'MLaC'/'ModelServing' deploy_path.mkdir(parents=True, exist_ok=True) # create the utility file importer.addLocalModule('*', mod_from='utility') utility_obj = utility_function('Prediction') with open(deploy_path/"utility.py", 'w') as f: f.write(file_header(usecase) + utility_obj.get_code()) generated_files.append("utility.py") # create the production data reader file importer.addLocalModule('*', mod_from='data_reader') reader_obj = data_reader(['sqlite','influx']) with open(deploy_path/"data_reader.py", 'w') as f: f.write(file_header(usecase) + reader_obj.get_code()) generated_files.append("data_reader.py") # need to copy data profiler from AION code as code is splitted and merging code amnnually # can add bugs aion_utilities = Path(__file__).parent.parent.parent.parent / 'utilities' with tarfile.open(aion_utilities / 'text.tar') as file: file.extractall(deploy_path) if (deploy_path / 'utils').exists(): shutil.rmtree(deploy_path / 'utils') with tarfile.open(aion_utilities / 'utils.tar') as file: file.extractall(deploy_path ) generated_files.append("text") generated_files.append("utils") # create empty init file required for creating a package with open(deploy_path/"__init__.py", 'w') as f: f.write(file_header(usecase)) generated_files.append("__init__.py") function.add_function('get_mlflow_uris') code = file_header(usecase) code += importer.getCode() code += deployer.getInputOutputFiles() code += function.getCode() code += deployer.getCode() # create prediction file with open(deploy_path/"predict.py", 'w') as f: f.write(code) generated_files.append("predict.py") # create groundtruth file with open(deploy_path/"groundtruth.py", 'w') as f: f.write(file_header(usecase) + deployer.getGroundtruthCode()) generated_files.append("groundtruth.py") # create create service file with open(deploy_path/"aionCode.py", 'w') as f: f.write(file_header(usecase) + deployer.getServiceCode()) generated_files.append("aionCode.py") importer.addModule('seaborn') # create requirements file req_file = deploy_path/"requirements.txt" with open(req_file, "w") as f: req=importer.getBaseModule(extra_importers=[utility_obj.get_importer(), reader_obj.get_importer()]) if config["text_features"]: req += add_text_dependency() f.write(req) generated_files.append("requirements.txt") # create config file config_file = deploy_path/"config.json" config_data = get_deploy_params(config) with open (config_file, "w") as f: json.dump(config_data, f, indent=4) generated_files.append("config.json") # create docker file create_docker_file('Prediction', deploy_path,config['modelName'], generated_files, True if config["text_features"] else False)
trainer.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from pathlib import Path import json from mlac.ml.core import * from .utility import * def get_model_name(algo, method): if method == 'modelBased': return algo + '_' + 'MLBased' if method == 'statisticalBased': return algo + '_' + 'StatisticsBased' else: return algo def get_training_params(config, algo): param_keys = ["modelVersion","problem_type","target_feature","train_features","scoring_criteria","test_ratio","optimization_param"] data = {key:value for (key,value) in config.items() if key in param_keys} data['algorithms'] = {algo: config['algorithms'][algo]} data['targetPath'] = config['modelName'] return data def addImporterLearner(model, importer): module = get_module_mapping(model) mod_from = module.get('mod_from',None) mod_as = module.get('mod_as',None) importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as) if not get_variable('non_sklearn_modules'): update_variable('non_sklearn_modules', []) if 'sklearn' not in mod_from: modules = get_variable('non_sklearn_modules') modules.append(model) update_variable('non_sklearn_modules', modules) def addEvaluator(scorer_type, optimizer,trainer, importer): trainer.addStatement("if not X_test.empty:") if optimizer == 'genetic': trainer.addStatement('features = [x for i,x in enumerate(features) if grid.support_[i]]',indent=2) trainer.addStatement('y_pred = estimator.predict(X_test[features])',indent=2) if scorer_type == 'accuracy': importer.addModule('accuracy_score', mod_from='sklearn.metrics') trainer.addStatement(f"test_score = round(accuracy_score(y_test,y_pred),2) * 100",indent=2) importer.addModule('confusion_matrix', mod_from='sklearn.metrics') trainer.addStatement("log.info('Confusion Matrix:')",indent=2) trainer.addStatement("log.info('\\n' + pd.DataFrame(confusion_matrix(y_test,y_pred)).to_string())",indent=2) elif scorer_type == 'recall': importer.addModule('recall_score', mod_from='sklearn.metrics') trainer.addStatement(f"test_score = round(recall_score(y_test,y_pred,average='macro'),2) * 100",indent=2) importer.addModule('confusion_matrix', mod_from='sklearn.metrics') trainer.addStatement(f"log.info('Confusion Matrix:\\n')",indent=2) trainer.addStatement(f'log.info(pd.DataFrame(confusion_matrix(y_test,y_pred)))',indent=2) elif scorer_type == 'precision': importer.addModule('precision_score', mod_from='sklearn.metrics') trainer.addStatement(f"test_score = round(precision_score(y_test,y_pred,average='macro'),2) * 100",indent=2) importer.addModule('confusion_matrix', mod_from='sklearn.metrics') trainer.addStatement(f"log.info('Confusion Matrix:\\n')",indent=2) trainer.addStatement(f'log.info(pd.DataFrame(confusion_matrix(y_test,y_pred)))',indent=2) elif scorer_type == 'f1_score': importer.addModule('f1_score', mod_from='sklearn.metrics') trainer.addStatement(f"test_score = round(f1_score(y_test,y_pred,average='macro'),2) * 100",indent=2) importer.addModule('confusion_matrix', mod_from='sklearn.metrics') trainer.addStatement(f"log.info('Confusion Matrix:\\n')",indent=2) trainer.addStatement(f'log.info(pd.DataFrame(confusion_matrix(y_test,y_pred)))',indent=2) elif scorer_type == 'roc_auc': importer.addModule('roc_auc_score', mod_from='sklearn.metrics') trainer.addStatement("try:") trainer.addStatement(f"test_score = round(roc_auc_score(y_test,y_pred),2) * 100", indent=3) importer.addModule('confusion_matrix', mod_from='sklearn.metrics') trainer.addStatement(f"log.info('Confusion Matrix:\\n')",indent=3) trainer.addStatement(f'log.info(pd.DataFrame(confusion_matrix(y_test,y_pred)))',indent=3) trainer.addStatement("except:") trainer.addStatement("try:",indent=3) trainer.addStatement("actual = pd.get_dummies(y_test)",indent=4) trainer.addStatement("y_pred = pd.get_dummies(y_pred)",indent=4) trainer.addStatement(f"test_score = round(roc_auc_score(y_test,y_pred,average='weighted', multi_class='ovr'),2) * 100", indent=3) trainer.addStatement(f"log.info('Confusion Matrix:\\n')",indent=4) trainer.addStatement(f'log.info(pd.DataFrame(confusion_matrix(y_test,y_pred)))',indent=4) trainer.addStatement("except:",indent=3) trainer.addStatement(f"test_score = 0.0", indent=4) elif scorer_type == 'neg_mean_squared_error' or scorer_type == 'mse': importer.addModule('mean_squared_error', mod_from='sklearn.metrics') trainer.addStatement(f'test_score = round(mean_squared_error(y_test,y_pred),2)',indent=2) update_variable('smaller_is_better', True) elif scorer_type == 'neg_root_mean_squared_error' or scorer_type == 'rmse': importer.addModule('mean_squared_error', mod_from='sklearn.metrics') trainer.addStatement(f'test_score = round(mean_squared_error(y_test,y_pred,squared=False),2)',indent=2) update_variable('smaller_is_better', True) elif scorer_type == 'neg_mean_absolute_error' or scorer_type == 'mae': importer.addModule('mean_absolute_error', mod_from='sklearn.metrics') trainer.addStatement(f'test_score = round(mean_absolute_error(y_test,y_pred),2)',indent=2) update_variable('smaller_is_better', True) elif scorer_type == 'r2': importer.addModule('r2_score', mod_from='sklearn.metrics') trainer.addStatement(f'test_score = round(r2_score(y_test,y_pred),2)',indent=2) def update_search_space(algo, config): search_space = [] algoritms = config["algorithms"] model = algo params = algoritms[model] model_dict = {model:get_module_mapping(model)['mod_from']} d = {'algo': model_dict} d['param'] = params search_space.append(d) config['search_space'] = search_space def get_optimization(optimization, importer, function=None): if optimization == 'grid': importer.addModule('GridSearchCV', mod_from='sklearn.model_selection') optimization = 'GridSearchCV' elif optimization == 'random': importer.addModule('RandomizedSearchCV', mod_from='sklearn.model_selection') optimization = 'RandomizedSearchCV' elif optimization == 'genetic': importer.addModule('GeneticSelectionCV', mod_from='genetic_selection') optimization = 'GeneticSelectionCV' elif optimization == 'bayesopt': optimization = 'BayesSearchCV' function.add_function(optimization,importer) return optimization def scoring_criteria_reg(score_param): scorer_mapping = { 'mse':'neg_mean_squared_error', 'rmse':'neg_root_mean_squared_error', 'mae':'neg_mean_absolute_error', 'r2':'r2' } return scorer_mapping.get(score_param, 'neg_mean_squared_error') def addBalancing(balancingMethod, importer, code): if balancingMethod == 'oversample': importer.addModule('SMOTE', mod_from='imblearn.over_sampling') code.addStatement("\n # data balancing") code.addStatement("X_train, y_train = SMOTE(sampling_strategy='auto', k_neighbors=1, random_state=100).fit_resample(X_train, y_train)") if balancingMethod == 'undersample': importer.addModule('TomekLinks', mod_from='imblearn.under_sampling') code.addStatement("\n # data balancing") code.addStatement("X_train, y_train = TomekLinks().fit_resample(X_train, y_train)") def run_trainer(base_config): base_trainer = learner() base_importer = importModule() function = global_function() base_importer.addModule('joblib') base_importer.addModule('warnings') base_importer.addModule('argparse') base_importer.addModule('pandas', mod_as='pd') base_importer.addModule('Path', mod_from='pathlib') function.add_function('get_mlflow_uris') function.add_function('mlflow_create_experiment') importModules(base_importer,base_trainer.getPrefixModules()) base_trainer.addPrefixCode() if base_config["algorithms"]: base_trainer.add_train_test_split('train_features', 'target_feature', "config['test_ratio']") if base_config["problem_type"] == 'classification': if base_config["balancingMethod"]: addBalancing(base_config["balancingMethod"],base_importer,base_trainer) base_trainer.addStatement(f"log.info('Data balancing done')") base_trainer.addStatement("\n #select scorer") if base_config["problem_type"] == 'classification': function.add_function('scoring_criteria', base_importer) base_trainer.addStatement("scorer = scoring_criteria(config['scoring_criteria'],config['problem_type'], df[target_feature].nunique())") else: base_config['scoring_criteria'] = scoring_criteria_reg(base_config['scoring_criteria']) base_trainer.addStatement(f"scorer = config['scoring_criteria']") base_trainer.addStatement(f"log.info('Scoring criteria: {base_config['scoring_criteria']}')") feature_selector = [] if base_config['feature_reducer']: feature_selector.append(base_config['feature_reducer']) elif base_config['feature_selector']: feature_selector = base_config['feature_selector'] for algo in base_config["algorithms"].keys(): for method in feature_selector: trainer = learner() importer = importModule() trainer.copyCode(base_trainer) importer.copyCode(base_importer) config = base_config usecase = config['modelName']+'_'+config['modelVersion'] addImporterLearner(algo, importer) trainer.addStatement("\n #Training model") trainer.addStatement(f"log.info('Training {algo} for {method}')") trainer.add_model_fit(algo, get_optimization(config["optimization"], importer, function), method, importer) trainer.addStatement("\n #model evaluation") addEvaluator(config['scoring_criteria'],config["optimization"], trainer, importer) function.add_function('mlflowSetPath') function.add_function('logMlflow') importModules(importer, trainer.getSuffixModules()) importModules(importer, trainer.getMainCodeModules()) if base_config["problem_type"] == 'classification': function.add_function('classification_metrices', importer) trainer.addStatement("metrices = get_classification_metrices(y_test,y_pred)",indent=2) trainer.add_100_trainsize_code() trainer.addStatement("metrices.update({'train_score': train_score, 'test_score':test_score})") else: function.add_function('regression_metrices', importer) trainer.addStatement("metrices = get_regression_metrices(y_test,y_pred)",indent=2) trainer.add_100_trainsize_code() trainer.addStatement("metrices.update({'train_score': train_score, 'test_score':test_score})") trainer.addSuffixCode() trainer.addMainCode() model_name = get_model_name(algo,method) deploy_path = Path(config["deploy_path"])/'MLaC'/('ModelTraining'+'_' + model_name) deploy_path.mkdir(parents=True, exist_ok=True) generated_files = [] # create the utility file importer.addLocalModule('*', mod_from='utility') utility_obj = utility_function('train') with open(deploy_path/"utility.py", 'w') as f: f.write(file_header(usecase) + utility_obj.get_code()) generated_files.append("utility.py") # create empty init file to make a package with open(deploy_path/"__init__.py", 'w') as f: f.write(file_header(usecase)) generated_files.append("__init__.py") code = importer.getCode() code += 'warnings.filterwarnings("ignore")\n' code += f"\nmodel_name = '{model_name}'\n" append_variable('models_name',model_name) out_files = {'log':f'{model_name}_aion.log','model':f'{model_name}_model.pkl','performance':f'{model_name}_performance.json','metaDataOutput':f'{model_name}_modelMetaData.json'} trainer.addOutputFiles(out_files) code += trainer.getInputOutputFiles() code += function.getCode() trainer.addLocalFunctionsCode() code += trainer.getCode() with open(deploy_path/"aionCode.py", "w") as f: f.write(code) generated_files.append("aionCode.py") with open(deploy_path/"requirements.txt", "w") as f: req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()]) f.write(req) generated_files.append("requirements.txt") with open (deploy_path/"config.json", "w") as f: json.dump(get_training_params(config, algo), f, indent=4) generated_files.append("config.json") create_docker_file('train', deploy_path,config['modelName'], generated_files)
selector.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from pathlib import Path import json import platform from mlac.ml.core import * from .utility import * output_file_map = { 'feature_reducer' : {'feature_reducer' : 'feature_reducer.pkl'} } def get_selector_params(config): param_keys = ["modelVersion","problem_type","target_feature","train_features","cat_features","n_components"] data = {key:value for (key,value) in config.items() if key in param_keys} data['targetPath'] = config['modelName'] return data def run_selector(config): select = selector() importer = importModule() function = global_function() importModules(importer,select.getPrefixModules()) select.addPrefixCode() if config["target_feature"] in config["train_features"]: config["train_features"].remove(config["target_feature"]) select.addStatement("train_features = df.columns.tolist()") select.addStatement("target_feature = config['target_feature']") select.addStatement("train_features.remove(target_feature)") select.addStatement("cat_features = prev_step_output['cat_features']") select.add_variable('total_features',[]) select.addStatement("log.log_dataframe(df)") methods = config.get("feature_selector", None) feature_reducer = config.get("feature_reducer", None) select.addStatement("selected_features = {}") select.addStatement("meta_data['featureengineering']= {}") if feature_reducer: update_variable('feature_reducer', True) select.addStatement(f"log.info('Running dimensionality reduction technique( {feature_reducer})')") if feature_reducer == 'pca': importer.addModule('PCA', mod_from='sklearn.decomposition') if int(config["n_components"]) == 0: select.addStatement("dimension_reducer = PCA(n_components='mle',svd_solver = 'full')") elif int(config["n_components"]) < 1: select.addStatement("dimension_reducer = PCA(n_components=config['n_components'],svd_solver = 'full')") else: select.addStatement("dimension_reducer = PCA(n_components=config['n_components'])") elif feature_reducer == 'svd': importer.addModule('TruncatedSVD', mod_from='sklearn.decomposition') if config["n_components"] < 2: config["n_components"] = 2 select.addStatement("dimension_reducer = TruncatedSVD(n_components=config['n_components'], n_iter=7, random_state=42)") elif feature_reducer == 'factoranalysis': importer.addModule('FactorAnalysis', mod_from='sklearn.decomposition') if config["n_components"] == 0: select.addStatement("dimension_reducer = FactorAnalysis()") else: select.addStatement("dimension_reducer = FactorAnalysis(n_components=config['n_components'])") elif feature_reducer == 'ica': importer.addModule('FastICA', mod_from='sklearn.decomposition') if config["n_components"] == 0: select.addStatement("dimension_reducer = FastICA()") else: select.addStatement("dimension_reducer = FastICA(n_components=config['n_components'])") select.addStatement("pca_array = dimension_reducer.fit_transform(df[train_features])") select.addStatement("pca_columns = ['pca_'+str(e) for e in list(range(pca_array.shape[1]))]") select.addStatement("scaledDF = pd.DataFrame(pca_array, columns=pca_columns)") select.addStatement("scaledDF[target_feature] = df[target_feature]") select.addStatement("df = scaledDF") select.addStatement(f"selected_features['{feature_reducer}'] = pca_columns") select.addStatement("total_features = df.columns.tolist()") select.addStatement("meta_data['featureengineering']['feature_reducer']= {}") select.addStatement("reducer_file_name = str(targetPath/IOFiles['feature_reducer'])") importer.addModule('joblib') select.addStatement("joblib.dump(dimension_reducer, reducer_file_name)") select.addStatement("meta_data['featureengineering']['feature_reducer']['file']= IOFiles['feature_reducer']") select.addStatement("meta_data['featureengineering']['feature_reducer']['features']= train_features") select.addOutputFiles(output_file_map['feature_reducer']) elif methods: if 'allFeatures' in methods: addDropFeature('target_feature', 'train_features', select) select.addStatement("selected_features['allFeatures'] = train_features") if 'modelBased' in methods: select.addStatement(f"log.info('Model Based Correlation Analysis Start')") select.addStatement("model_based_feat = []") importer.addModule('numpy', mod_as='np') importer.addModule('RFE', mod_from='sklearn.feature_selection') importer.addModule('MinMaxScaler', mod_from='sklearn.preprocessing') if config["problem_type"] == 'classification': importer.addModule('ExtraTreesClassifier', mod_from='sklearn.ensemble') select.addStatement("estimator = ExtraTreesClassifier(n_estimators=100)") else: importer.addModule('Lasso', mod_from='sklearn.linear_model') select.addStatement("estimator = Lasso()") select.addStatement("estimator.fit(df[train_features],df[target_feature])") select.addStatement("rfe = RFE(estimator, n_features_to_select=1, verbose =0 )") select.addStatement("rfe.fit(df[train_features],df[target_feature])") select.addStatement("ranks = MinMaxScaler().fit_transform(-1*np.array([list(map(float, rfe.ranking_))]).T).T[0]") select.addStatement("ranks = list(map(lambda x: round(x,2), ranks))") select.addStatement("for item, rank in zip(df.columns,ranks):") select.addStatement("if rank > 0.30:", indent=2) select.addStatement("model_based_feat.append(item)", indent=3) addDropFeature('target_feature', 'model_based_feat', select) select.addStatement("selected_features['modelBased'] = model_based_feat") select.addStatement(f"log.info(f'Highly Correlated Features : {{model_based_feat}}')") if 'statisticalBased' in methods: select.addStatement(f"log.info('Statistical Based Correlation Analysis Start')") function.add_function('start_reducer',importer) select.addStatement(f"features = start_reducer(df, target_feature, {config['corr_threshold']},{config['var_threshold']})") select.addStatement("train_features = [x for x in features if x in train_features]") select.addStatement("cat_features = [x for x in cat_features if x in features]") select.addStatement("numeric_features = df[features].select_dtypes('number').columns.tolist()") if config["problem_type"] == 'classification': function.add_function('feature_importance_class') select.addStatement(f"statistics_based_feat = feature_importance_class(df[features], numeric_features, cat_features, target_feature, {config['pValueThreshold']},{config['corr_threshold']})") else: function.add_function('feature_importance_reg') select.addStatement(f"statistics_based_feat = feature_importance_reg(df[features], numeric_features, target_feature, {config['pValueThreshold']},{config['corr_threshold']})") addDropFeature('target_feature', 'statistics_based_feat', select) select.addStatement("selected_features['statisticalBased'] = statistics_based_feat") select.addStatement(f"log.info('Highly Correlated Features : {{statistics_based_feat}}')") select.addStatement("total_features = list(set([x for y in selected_features.values() for x in y] + [target_feature]))") select.addStatement(f"df = df[total_features]") select.addStatement("log.log_dataframe(df)") select.addSuffixCode() importModules(importer, select.getSuffixModules()) importModules(importer, select.getMainCodeModules()) select.addMainCode() generated_files = [] usecase = config['modelName']+'_'+config['modelVersion'] deploy_path = Path(config["deploy_path"])/'MLaC'/'FeatureEngineering' deploy_path.mkdir(parents=True, exist_ok=True) # create the utility file importer.addLocalModule('*', mod_from='utility') utility_obj = utility_function('selector') with open(deploy_path/"utility.py", 'w') as f: f.write(file_header(usecase) + utility_obj.get_code()) generated_files.append("utility.py") # create empty init file to make a package with open(deploy_path/"__init__.py", 'w') as f: f.write(file_header(usecase)) generated_files.append("__init__.py") code = file_header(usecase) code += importer.getCode() code += select.getInputOutputFiles() code += function.getCode() select.addLocalFunctionsCode() code += select.getCode() with open(deploy_path/"aionCode.py", "w") as f: f.write(code) generated_files.append("aionCode.py") with open(deploy_path/"requirements.txt", "w") as f: req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()]) f.write(req) generated_files.append("requirements.txt") config_file = deploy_path/"config.json" config_data = get_selector_params(config) with open (config_file, "w") as f: json.dump(config_data, f, indent=4) generated_files.append("config.json") create_docker_file('selector', deploy_path,config['modelName'], generated_files)
utility.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ import datetime from pathlib import Path variables = {} def init_variables(): global variables variables = {} def update_variable(name, value): variables[name] = value def get_variable(name, default=None): return variables.get(name, default) def append_variable(name, value): data = get_variable(name) if not data: update_variable(name, [value]) elif not isinstance(data, list): update_variable(name, [data, value]) else: data.append(value) update_variable(name, data) def addDropFeature(feature, features_list, coder, indent=1): coder.addStatement(f'if {feature} in {features_list}:', indent=indent) coder.addStatement(f'{features_list}.remove({feature})', indent=indent+1) def importModules(importer, modules_list): for module in modules_list: mod_from = module.get('mod_from',None) mod_as = module.get('mod_as',None) importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as) def file_header(use_case, module_name=None): time_str = datetime.datetime.now().isoformat(timespec='seconds', sep=' ') text = "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n" return text + f"'''\nThis file is automatically generated by AION for {use_case} usecase.\nFile generation time: {time_str}\n'''" def get_module_mapping(module): mapping = { "LogisticRegression": {'module':'LogisticRegression', 'mod_from':'sklearn.linear_model'} ,"GaussianNB": {'module':'GaussianNB', 'mod_from':'sklearn.naive_bayes'} ,"DecisionTreeClassifier": {'module':'DecisionTreeClassifier', 'mod_from':'sklearn.tree'} ,"SVC": {'module':'SVC', 'mod_from':'sklearn.svm'} ,"KNeighborsClassifier": {'module':'KNeighborsClassifier', 'mod_from':'sklearn.neighbors'} ,"GradientBoostingClassifier": {'module':'GradientBoostingClassifier', 'mod_from':'sklearn.ensemble'} ,'RandomForestClassifier':{'module':'RandomForestClassifier','mod_from':'sklearn.ensemble'} ,'XGBClassifier':{'module':'XGBClassifier','mod_from':'xgboost'} ,'LGBMClassifier':{'module':'LGBMClassifier','mod_from':'lightgbm'} ,'CatBoostClassifier':{'module':'CatBoostClassifier','mod_from':'catboost'} ,"LinearRegression": {'module':'LinearRegression', 'mod_from':'sklearn.linear_model'} ,"Lasso": {'module':'Lasso', 'mod_from':'sklearn.linear_model'} ,"Ridge": {'module':'Ridge', 'mod_from':'sklearn.linear_model'} ,"DecisionTreeRegressor": {'module':'DecisionTreeRegressor', 'mod_from':'sklearn.tree'} ,'RandomForestRegressor':{'module':'RandomForestRegressor','mod_from':'sklearn.ensemble'} ,'XGBRegressor':{'module':'XGBRegressor','mod_from':'xgboost'} ,'LGBMRegressor':{'module':'LGBMRegressor','mod_from':'lightgbm'} ,'CatBoostRegressor':{'module':'CatBoostRegressor','mod_from':'catboost'} } return mapping.get(module, None) def create_docker_file(name, path,usecasename,files=[],text_feature=False): text = "" if name == 'load_data': text='FROM python:3.8-slim-buster' text+='\n' text+='LABEL "usecase"="'+str(usecasename)+'"' text+='\n' text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"' for file in files: text+=f'\nCOPY {file} {file}' text+='\n' text+='RUN pip install --no-cache-dir -r requirements.txt' elif name == 'transformer': text='FROM python:3.8-slim-buster\n' text+='LABEL "usecase"="'+str(usecasename)+'"' text+='\n' text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"' text+='\n' for file in files: text+=f'\nCOPY {file} {file}' text+='\n' text+='''RUN \ ''' text+=''' pip install --no-cache-dir -r requirements.txt\ ''' if text_feature: text += ''' && python -m nltk.downloader stopwords && python -m nltk.downloader punkt && python -m nltk.downloader wordnet && python -m nltk.downloader averaged_perceptron_tagger\ ''' text+='\n' elif name == 'selector': text='FROM python:3.8-slim-buster' text+='\n' text+='LABEL "usecase"="'+str(usecasename)+'"' text+='\n' text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"' text+='\n' for file in files: text+=f'\nCOPY {file} {file}' text+='\n' text+='RUN pip install --no-cache-dir -r requirements.txt' elif name == 'train': text='FROM python:3.8-slim-buster' text+='\n' text+='LABEL "usecase"="'+str(usecasename)+'"' text+='\n' text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"' text+='\n' text+='COPY requirements.txt requirements.txt' text+='\n' text+='COPY config.json config.json' text+='\n' text+='COPY aionCode.py aionCode.py' text+='\n' text+='COPY utility.py utility.py' text+='\n' text+='RUN pip install --no-cache-dir -r requirements.txt' elif name == 'register': text='FROM python:3.8-slim-buster' text+='\n' text+='LABEL "usecase"="'+str(usecasename)+'"' text+='\n' text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"' text+='\n' for file in files: text+=f'\nCOPY {file} {file}' text+='\n' text+='RUN pip install --no-cache-dir -r requirements.txt' elif name == 'Prediction': text='FROM python:3.8-slim-buster' text+='\n' text+='LABEL "usecase"="'+str(usecasename)+'"' text+='\n' text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"' text+='\n' for file in files: text+=f'\nCOPY {file} {file}' text+='\n' text+='''RUN \ ''' text+='''pip install --no-cache-dir -r requirements.txt\ ''' if text_feature: text += ''' && python -m nltk.downloader stopwords && python -m nltk.downloader punkt && python -m nltk.downloader wordnet && python -m nltk.downloader averaged_perceptron_tagger\ ''' text+='\n' text+='ENTRYPOINT ["python", "aionCode.py","-ip","0.0.0.0","-pn","8094"]\n' elif name == 'input_drift': text='FROM python:3.8-slim-buster' text+='\n' text+='LABEL "usecase"="'+str(usecasename)+'"' text+='\n' text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"' text+='\n' for file in files: text+=f'\nCOPY {file} {file}' text+='\n' text+='RUN pip install --no-cache-dir -r requirements.txt' file_name = Path(path)/'Dockerfile' with open(file_name, 'w') as f: f.write(text)
drift_analysis.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from pathlib import Path import json from mlac.ml.core import * from .utility import * imported_modules = [ {'module': 'sys', 'mod_from': None, 'mod_as': None}, {'module': 'json', 'mod_from': None, 'mod_as': None}, {'module': 'math', 'mod_from': None, 'mod_as': None}, {'module': 'joblib', 'mod_from': None, 'mod_as': None}, {'module': 'mlflow', 'mod_from': None, 'mod_as': None}, {'module': 'sklearn', 'mod_from': None, 'mod_as': None}, {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}, {'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}, {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, {'module': 'argparse', 'mod_from': None, 'mod_as': None}, {'module': 'stats', 'mod_from': 'scipy', 'mod_as': 'st'}, {'module': 'platform', 'mod_from': None, 'mod_as': None } ] def get_drift_params(config): param_keys = ["modelVersion","problem_type","target_feature","selected_features","scoring_criteria","s3"] data = {key:value for (key,value) in config.items() if key in param_keys} usecase = config['modelName'] data['targetPath'] = usecase if config['dataLocation'] != '': data['inputUri'] = config['dataLocation'] else: data['inputUri'] = '<input datalocation>' data['prod_db_type'] = config.get('prod_db_type', 'sqlite') data['db_config'] = config.get('db_config', {}) data['mlflow_config'] = config.get('mlflow_config', {'artifacts_uri':'','tracking_uri_type':'','tracking_uri':'','registry_uri':''}) return data def run_drift_analysis(config): init_variables() importer = importModule() function = global_function() drifter = drift() importModules(importer, imported_modules) usecase = config['modelName']+'_'+config['modelVersion'] deploy_path = Path(config["deploy_path"])/'MLaC'/'ModelMonitoring' deploy_path.mkdir(parents=True, exist_ok=True) generated_files = [] # create the utility file importer.addLocalModule('*', mod_from='utility') utility_obj = utility_function('drift') with open(deploy_path/"utility.py", 'w') as f: f.write(file_header(usecase) + utility_obj.get_code()) generated_files.append("utility.py") # create the production data reader file importer.addLocalModule('dataReader', mod_from='data_reader') readers = ['sqlite','influx'] if 's3' in config.keys(): readers.append('s3') reader_obj = data_reader(readers) with open(deploy_path/"data_reader.py", 'w') as f: f.write(file_header(usecase) + reader_obj.get_code()) generated_files.append("data_reader.py") # create empty init file to make a package with open(deploy_path/"__init__.py", 'w') as f: f.write(file_header(usecase)) generated_files.append("__init__.py") importer.addLocalModule('inputdrift', mod_from='input_drift') code = file_header(usecase) code += importer.getCode() code += drifter.getInputOutputFiles() code += function.getCode() code += drifter.get_main_drift_code(config['problem_type'], get_variable('smaller_is_better', False)) with open(deploy_path/"aionCode.py", "w") as f: f.write(code) generated_files.append("aionCode.py") input_drift_importer = importModule() importModules(input_drift_importer, drifter.get_input_drift_import_modules()) code = file_header(usecase) code += input_drift_importer.getCode() code += drifter.get_input_drift_code() with open(deploy_path/"input_drift.py", "w") as f: f.write(code) generated_files.append("input_drift.py") with open (deploy_path/"config.json", "w") as f: json.dump(get_drift_params(config), f, indent=4) generated_files.append("config.json") req_file = deploy_path/"requirements.txt" with open(req_file, "w") as f: f.write(importer.getBaseModule(extra_importers=[utility_obj.get_importer(), reader_obj.get_importer(), input_drift_importer])) generated_files.append("requirements.txt") create_docker_file('input_drift', deploy_path,config['modelName'], generated_files)
transformer.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ import shutil from pathlib import Path import json from mlac.ml.core import * from .utility import * import tarfile output_file_map = { 'text' : {'text' : 'text_profiler.pkl'}, 'targetEncoder' : {'targetEncoder' : 'targetEncoder.pkl'}, 'featureEncoder' : {'featureEncoder' : 'inputEncoder.pkl'}, 'normalizer' : {'normalizer' : 'normalizer.pkl'} } def add_common_imports(importer): common_importes = [ {'module': 'json', 'mod_from': None, 'mod_as': None}, {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}, {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, {'module': 'argparse', 'mod_from': None, 'mod_as': None}, {'module': 'platform', 'mod_from': None, 'mod_as': None } ] for mod in common_importes: importer.addModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as']) def add_text_dependency(): return """nltk==3.6.3 textblob==0.15.3 spacy==3.1.3 demoji==1.1.0 bs4==0.0.1 text_unidecode==1.3 contractions==0.1.73 """ def get_transformer_params(config): param_keys = ["modelVersion","problem_type","target_feature","train_features","text_features","profiler","test_ratio"] #Bugid 13217 data = {key:value for (key,value) in config.items() if key in param_keys} data['targetPath'] = config['modelName'] return data def run_transformer(config): transformer = profiler() importer = importModule() function = global_function() importModules(importer, transformer.getPrefixModules()) importer.addModule('warnings') transformer.addPrefixCode() importer.addModule('train_test_split', mod_from='sklearn.model_selection') if config["problem_type"] == 'classification': importer.addModule('LabelEncoder', mod_from='sklearn.preprocessing') transformer.addInputFiles({'targetEncoder':'targetEncoder.pkl'}) update_variable('target_encoder', True) transformer.addStatement("train_data, test_data = train_test_split(df,stratify=df[target_feature],test_size=config['test_ratio'])",indent=2) #Bugid 13217 transformer.addStatement("profilerObj = profiler(xtrain=train_data, target=target_feature, encode_target=True, config=config['profiler'],log=log)") #Bugid 13217 else: transformer.addStatement("train_data, test_data = train_test_split(df,test_size=config['test_ratio'])",indent=2) transformer.addStatement("profilerObj = profiler(xtrain=train_data, target=target_feature, config=config['profiler'],log=log)") importModules(importer, transformer.getSuffixModules()) importModules(importer, transformer.getMainCodeModules()) transformer.addSuffixCode( config["problem_type"] == 'classification') transformer.addMainCode() usecase = config['modelName']+'_'+config['modelVersion'] deploy_path = Path(config["deploy_path"])/'MLaC'/'DataTransformation' deploy_path.mkdir(parents=True, exist_ok=True) generated_files = [] # create the utility file importer.addLocalModule('*', mod_from='utility') utility_obj = utility_function('transformer') with open(deploy_path/"utility.py", 'w') as f: f.write(file_header(usecase) + utility_obj.get_code()) generated_files.append("utility.py") # create empty init file to make a package with open(deploy_path/"__init__.py", 'w') as f: f.write(file_header(usecase)) generated_files.append("__init__.py") # create the dataProfiler file profiler_importer = importModule() importer.addLocalModule('profiler', mod_from='dataProfiler') profiler_obj = data_profiler(profiler_importer, True if config["text_features"] else False) code_text = profiler_obj.get_code() # import statement will be generated when profiler_obj.get_code is called. # need to copy data profiler from AION code as code is splitted and merging code amnnually # can add bugs. need a better way to find the imported module #aion_transformer = Path(__file__).parent.parent.parent.parent/'transformations' aion_utilities = Path(__file__).parent.parent.parent.parent/'utilities' #added for non encryption --Usnish (deploy_path/'transformations').mkdir(parents=True, exist_ok=True) if not (aion_utilities/'transformations'/'dataProfiler.py').exists(): raise ValueError('Data profiler file removed from AION') shutil.copy(aion_utilities/'transformations'/'dataProfiler.py',deploy_path/"dataProfiler.py") shutil.copy(aion_utilities/'transformations'/'data_profiler_functions.py',deploy_path/"transformations"/"data_profiler_functions.py") if (deploy_path/'text').exists(): shutil.rmtree(deploy_path/'text') with tarfile.open(aion_utilities/'text.tar') as file: file.extractall(deploy_path) if (deploy_path/'utils').exists(): shutil.rmtree(deploy_path/'utils') with tarfile.open(aion_utilities / 'utils.tar') as file: file.extractall(deploy_path) generated_files.append("dataProfiler.py") generated_files.append("transformations") generated_files.append("text") generated_files.append("utils") code = file_header(usecase) code += "\nimport os\nos.path.abspath(os.path.join(__file__, os.pardir))\n" #chdir to import from current dir code += importer.getCode() code += '\nwarnings.filterwarnings("ignore")\n' code += transformer.getInputOutputFiles() code += function.getCode() transformer.addLocalFunctionsCode() code += transformer.getCode() with open(deploy_path/"aionCode.py", "w") as f: f.write(code) generated_files.append("aionCode.py") with open(deploy_path/"requirements.txt", "w") as f: req=importer.getBaseModule(extra_importers=[utility_obj.get_importer(), profiler_importer]) if config["text_features"]: req += add_text_dependency() f.write(req) generated_files.append("requirements.txt") config_file = deploy_path/"config.json" config_data = get_transformer_params(config) with open (config_file, "w") as f: json.dump(config_data, f, indent=4) generated_files.append("config.json") create_docker_file('transformer', deploy_path,config['modelName'], generated_files,True if config["text_features"] else False)
register.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from pathlib import Path import json from mlac.ml.core import * from .utility import * def get_register_params(config, models): param_keys = ["modelVersion","problem_type"] data = {key:value for (key,value) in config.items() if key in param_keys} data['targetPath'] = config['modelName'] data['models'] = models return data def run_register(config): importer = importModule() function = global_function() registration = register(importer) function.add_function('get_mlflow_uris') models = get_variable('models_name') smaller_is_better = get_variable('smaller_is_better', False) registration.addClassCode(smaller_is_better) registration.addLocalFunctionsCode(models) registration.addPrefixCode() registration.addMainCode(models) importModules(importer, registration.getMainCodeModules()) importer.addModule('warnings') generated_files = [] usecase = config['modelName']+'_'+config['modelVersion'] deploy_path = Path(config["deploy_path"])/'MLaC'/'ModelRegistry' deploy_path.mkdir(parents=True, exist_ok=True) # create the utility file importer.addLocalModule('*', mod_from='utility') utility_obj = utility_function('register') with open(deploy_path/"utility.py", 'w') as f: f.write(file_header(usecase) + utility_obj.get_code()) generated_files.append("utility.py") # create empty init file required for creating a package with open(deploy_path/"__init__.py", 'w') as f: f.write(file_header(usecase)) generated_files.append("__init__.py") code = registration.getImportCode() code += '\nwarnings.filterwarnings("ignore")\n' code += registration.getInputOutputFiles() code += function.getCode() code += registration.getCode() # create serving file with open(deploy_path/"aionCode.py", 'w') as f: f.write(file_header(usecase) + code) generated_files.append("aionCode.py") # create requirements file req_file = deploy_path/"requirements.txt" with open(req_file, "w") as f: req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()]) f.write(req) generated_files.append("requirements.txt") # create config file with open (deploy_path/"config.json", "w") as f: json.dump(get_register_params(config, models), f, indent=4) generated_files.append("config.json") # create docker file create_docker_file('register', deploy_path,config['modelName'], generated_files)
__init__.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from .load_data import run_loader from .transformer import run_transformer from .selector import run_selector from .trainer import run_trainer from .register import run_register from .deploy import run_deploy from .drift_analysis import run_drift_analysis
load_data.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from pathlib import Path import json import platform from mlac.ml.core import * from .utility import * imported_modules = [ {'module': 'json', 'mod_from': None, 'mod_as': None}, {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}, {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, {'module': 'argparse', 'mod_from': None, 'mod_as': None}, {'module': 'platform', 'mod_from': None, 'mod_as': None } ] def get_load_data_params(config): param_keys = ["modelVersion","problem_type","target_feature","selected_features"] data = {key:value for (key,value) in config.items() if key in param_keys} data['targetPath'] = config['modelName'] return data def run_loader(config): generated_files = [] importer = importModule() loader = tabularDataReader() importModules(importer, imported_modules) usecase = config['modelName']+'_'+config['modelVersion'] deploy_path = Path(config["deploy_path"])/'MLaC'/'DataIngestion' deploy_path.mkdir(parents=True, exist_ok=True) # create the utility file importer.addLocalModule('*', mod_from='utility') utility_obj = utility_function('load_data') with open(deploy_path/"utility.py", 'w') as f: f.write(file_header(usecase) + utility_obj.get_code()) generated_files.append("utility.py") # create the production data reader file importer.addLocalModule('dataReader', mod_from='data_reader') readers = ['sqlite','influx'] if 's3' in config.keys(): readers.append('s3') reader_obj = data_reader(readers) with open(deploy_path/"data_reader.py", 'w') as f: f.write(file_header(usecase) + reader_obj.get_code()) generated_files.append("data_reader.py") # create empty init file to make a package with open(deploy_path/"__init__.py", 'w') as f: f.write(file_header(usecase)) generated_files.append("__init__.py") code = file_header(usecase) code += importer.getCode() code += loader.getInputOutputFiles() code += loader.getCode() with open(deploy_path/"aionCode.py", "w") as f: f.write(code) generated_files.append("aionCode.py") with open(deploy_path/"requirements.txt", "w") as f: req=importer.getBaseModule(extra_importers=[utility_obj.get_importer(), reader_obj.get_importer()]) f.write(req) generated_files.append("requirements.txt") config_file = deploy_path/"config.json" config_data = get_load_data_params(config) with open (config_file, "w") as f: json.dump(config_data, f, indent=4) generated_files.append("config.json") create_docker_file('load_data', deploy_path,config['modelName'],generated_files)
__init__.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """
input_drift.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ class input_drift(): def __init__(self, tab_size=4): self.tab = ' ' * tab_size self.codeText = '' def addInputDriftClass(self): text = "\ \nclass inputdrift():\ \n\ \n def __init__(self,base_config):\ \n self.usecase = base_config['modelName'] + '_' + base_config['modelVersion']\ \n self.currentDataLocation = base_config['currentDataLocation']\ \n home = Path.home()\ \n if platform.system() == 'Windows':\ \n from pathlib import WindowsPath\ \n output_data_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'Data'\ \n output_model_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'target'/self.usecase\ \n else:\ \n from pathlib import PosixPath\ \n output_data_dir = PosixPath(home)/'HCLT'/'AION'/'Data'\ \n output_model_dir = PosixPath(home)/'HCLT'/'AION'/'target'/self.usecase\ \n if not output_model_dir.exists():\ \n raise ValueError(f'Configuration file not found at {output_model_dir}')\ \n\ \n tracking_uri = 'file:///' + str(Path(output_model_dir)/'mlruns')\ \n registry_uri = 'sqlite:///' + str(Path(output_model_dir)/'mlruns.db')\ \n mlflow.set_tracking_uri(tracking_uri)\ \n mlflow.set_registry_uri(registry_uri)\ \n client = mlflow.tracking.MlflowClient(\ \n tracking_uri=tracking_uri,\ \n registry_uri=registry_uri,\ \n )\ \n model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase)\ \n model = mlflow.pyfunc.load_model(model_version_uri)\ \n run = client.get_run(model.metadata.run_id)\ \n if run.info.artifact_uri.startswith('file:'):\ \n artifact_path = Path(run.info.artifact_uri[len('file:///') : ])\ \n else:\ \n artifact_path = Path(run.info.artifact_uri)\ \n self.trainingDataPath = artifact_path/(self.usecase + '_data.csv')\ \n\ \n def get_input_drift(self,current_data, historical_data):\ \n curr_num_feat = current_data.select_dtypes(include='number')\ \n hist_num_feat = historical_data.select_dtypes(include='number')\ \n num_features = [feat for feat in historical_data.columns if feat in curr_num_feat]\ \n alert_count = 0\ \n data = {\ \n 'current':{'data':current_data},\ \n 'hist': {'data': historical_data}\ \n }\ \n dist_changed_columns = []\ \n dist_change_message = []\ \n for feature in num_features:\ \n curr_static_value = st.ks_2samp( hist_num_feat[feature], curr_num_feat[feature]).pvalue\ \n if (curr_static_value < 0.05):\ \n distribution = {}\ \n distribution['hist'] = self.DistributionFinder( historical_data[feature])\ \n distribution['curr'] = self.DistributionFinder( current_data[feature])\ \n if(distribution['hist']['name'] == distribution['curr']['name']):\ \n pass\ \n else:\ \n alert_count = alert_count + 1\ \n dist_changed_columns.append(feature)\ \n changed_column = {}\ \n changed_column['Feature'] = feature\ \n changed_column['KS_Training'] = curr_static_value\ \n changed_column['Training_Distribution'] = distribution['hist']['name']\ \n changed_column['New_Distribution'] = distribution['curr']['name']\ \n dist_change_message.append(changed_column)\ \n if alert_count:\ \n resultStatus = dist_change_message\ \n else :\ \n resultStatus='Model is working as expected'\ \n return(alert_count, resultStatus)\ \n\ \n def DistributionFinder(self,data):\ \n best_distribution =''\ \n best_sse =0.0\ \n if(data.dtype in ['int','int64']):\ \n distributions= {'bernoulli':{'algo':st.bernoulli},\ \n 'binom':{'algo':st.binom},\ \n 'geom':{'algo':st.geom},\ \n 'nbinom':{'algo':st.nbinom},\ \n 'poisson':{'algo':st.poisson}\ \n }\ \n index, counts = np.unique(data.astype(int),return_counts=True)\ \n if(len(index)>=2):\ \n best_sse = np.inf\ \n y1=[]\ \n total=sum(counts)\ \n mean=float(sum(index*counts))/total\ \n variance=float((sum(index**2*counts) -total*mean**2))/(total-1)\ \n dispersion=mean/float(variance)\ \n theta=1/float(dispersion)\ \n r=mean*(float(theta)/1-theta)\ \n\ \n for j in counts:\ \n y1.append(float(j)/total)\ \n distributions['bernoulli']['pmf'] = distributions['bernoulli']['algo'].pmf(index,mean)\ \n distributions['binom']['pmf'] = distributions['binom']['algo'].pmf(index,len(index),p=mean/len(index))\ \n distributions['geom']['pmf'] = distributions['geom']['algo'].pmf(index,1/float(1+mean))\ \n distributions['nbinom']['pmf'] = distributions['nbinom']['algo'].pmf(index,mean,r)\ \n distributions['poisson']['pmf'] = distributions['poisson']['algo'].pmf(index,mean)\ \n\ \n sselist = []\ \n for dist in distributions.keys():\ \n distributions[dist]['sess'] = np.sum(np.power(y1 - distributions[dist]['pmf'], 2.0))\ \n if np.isnan(distributions[dist]['sess']):\ \n distributions[dist]['sess'] = float('inf')\ \n best_dist = min(distributions, key=lambda v: distributions[v]['sess'])\ \n best_distribution = best_dist\ \n best_sse = distributions[best_dist]['sess']\ \n\ \n elif (len(index) == 1):\ \n best_distribution = 'Constant Data-No Distribution'\ \n best_sse = 0.0\ \n elif(data.dtype in ['float64','float32']):\ \n distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta]\ \n best_distribution = st.norm.name\ \n best_sse = np.inf\ \n nrange = data.max() - data.min()\ \n\ \n y, x = np.histogram(data.astype(float), bins='auto', density=True)\ \n x = (x + np.roll(x, -1))[:-1] / 2.0\ \n\ \n for distribution in distributions:\ \n with warnings.catch_warnings():\ \n warnings.filterwarnings('ignore')\ \n params = distribution.fit(data.astype(float))\ \n arg = params[:-2]\ \n loc = params[-2]\ \n scale = params[-1]\ \n pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)\ \n sse = np.sum(np.power(y - pdf, 2.0))\ \n if( sse < best_sse):\ \n best_distribution = distribution.name\ \n best_sse = sse\ \n\ \n return {'name':best_distribution, 'sse': best_sse}\ \n\ " return text def addSuffixCode(self, indent=1): text ="\n\ \ndef check_drift( config):\ \n inputdriftObj = inputdrift(config)\ \n historicaldataFrame=pd.read_csv(inputdriftObj.trainingDataPath)\ \n currentdataFrame=pd.read_csv(inputdriftObj.currentDataLocation)\ \n dataalertcount,message = inputdriftObj.get_input_drift(currentdataFrame,historicaldataFrame)\ \n if message == 'Model is working as expected':\ \n output_json = {'status':'SUCCESS','data':{'Message':'Model is working as expected'}}\ \n else:\ \n output_json = {'status':'SUCCESS','data':{'Affected Columns':message}}\ \n return(output_json)\ \n\ \nif __name__ == '__main__':\ \n try:\ \n if len(sys.argv) < 2:\ \n raise ValueError('config file not present')\ \n config = sys.argv[1]\ \n if Path(config).is_file() and Path(config).suffix == '.json':\ \n with open(config, 'r') as f:\ \n config = json.load(f)\ \n else:\ \n config = json.loads(config)\ \n output = check_drift(config)\ \n status = {'Status':'Success','Message':output}\ \n print('input_drift:'+json.dumps(status))\ \n except Exception as e:\ \n status = {'Status':'Failure','Message':str(e)}\ \n print('input_drift:'+json.dumps(status))" return text def addStatement(self, statement, indent=1): self.codeText += '\n' + self.tab * indent + statement def generateCode(self): self.codeText += self.addInputDriftClass() self.codeText += self.addSuffixCode() def getCode(self): return self.codeText
output_drift.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ class output_drift(): def __init__(self, missing=False, word2num_features = None, cat_encoder=False, target_encoder=False, normalizer=False, text_profiler=False, feature_reducer=False, score_smaller_is_better=True, problem_type='classification', tab_size=4): self.tab = ' ' * tab_size self.codeText = '' self.missing = missing self.word2num_features = word2num_features self.cat_encoder = cat_encoder self.target_encoder = target_encoder self.normalizer = normalizer self.text_profiler = text_profiler self.feature_reducer = feature_reducer self.score_smaller_is_better = score_smaller_is_better self.problem_type = problem_type def addDatabaseClass(self, indent=0): text = "\ \nclass database():\ \n def __init__(self, config):\ \n self.host = config['host']\ \n self.port = config['port']\ \n self.user = config['user']\ \n self.password = config['password']\ \n self.database = config['database']\ \n self.measurement = config['measurement']\ \n self.tags = config['tags']\ \n self.client = self.get_client()\ \n\ \n def read_data(self, query)->pd.DataFrame:\ \n cursor = self.client.query(query)\ \n points = cursor.get_points()\ \n my_list=list(points)\ \n df=pd.DataFrame(my_list)\ \n return df\ \n\ \n def get_client(self):\ \n client = InfluxDBClient(self.host,self.port,self.user,self.password)\ \n databases = client.get_list_database()\ \n databases = [x['name'] for x in databases]\ \n if self.database not in databases:\ \n client.create_database(self.database)\ \n return InfluxDBClient(self.host,self.port,self.user,self.password, self.database)\ \n\ \n def write_data(self,data):\ \n if isinstance(data, pd.DataFrame):\ \n sorted_col = data.columns.tolist()\ \n sorted_col.sort()\ \n data = data[sorted_col]\ \n data = data.to_dict(orient='records')\ \n for row in data:\ \n if 'time' in row.keys():\ \n p = '%Y-%m-%dT%H:%M:%S.%fZ'\ \n time_str = datetime.strptime(row['time'], p)\ \n del row['time']\ \n else:\ \n time_str = None\ \n if 'model_ver' in row.keys():\ \n self.tags['model_ver']= row['model_ver']\ \n del row['model_ver']\ \n json_body = [{\ \n 'measurement': self.measurement,\ \n 'time': time_str,\ \n 'tags': self.tags,\ \n 'fields': row\ \n }]\ \n self.client.write_points(json_body)\ \n\ \n def close(self):\ \n self.client.close()\ \n" if indent: text = text.replace('\n', (self.tab * indent) + '\n') return text def addPredictClass(self, indent=0): text = "\ \nclass predict():\ \n\ \n def __init__(self, base_config):\ \n self.usecase = base_config['modelName'] + '_' + base_config['modelVersion']\ \n self.dataLocation = base_config['dataLocation']\ \n self.db_enabled = base_config.get('db_enabled', False)\ \n if self.db_enabled:\ \n self.db_config = base_config['db_config']\ \n home = Path.home()\ \n if platform.system() == 'Windows':\ \n from pathlib import WindowsPath\ \n output_data_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'Data'\ \n output_model_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'target'/self.usecase\ \n else:\ \n from pathlib import PosixPath\ \n output_data_dir = PosixPath(home)/'HCLT'/'AION'/'Data'\ \n output_model_dir = PosixPath(home)/'HCLT'/'AION'/'target'/self.usecase\ \n if not output_model_dir.exists():\ \n raise ValueError(f'Configuration file not found at {output_model_dir}')\ \n\ \n tracking_uri = 'file:///' + str(Path(output_model_dir)/'mlruns')\ \n registry_uri = 'sqlite:///' + str(Path(output_model_dir)/'mlruns.db')\ \n mlflow.set_tracking_uri(tracking_uri)\ \n mlflow.set_registry_uri(registry_uri)\ \n client = mlflow.tracking.MlflowClient(\ \n tracking_uri=tracking_uri,\ \n registry_uri=registry_uri,\ \n )\ \n self.model_version = client.get_latest_versions(self.usecase, stages=['production'] )[0].version\ \n model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase)\ \n self.model = mlflow.pyfunc.load_model(model_version_uri)\ \n run = client.get_run(self.model.metadata.run_id)\ \n if run.info.artifact_uri.startswith('file:'): #remove file:///\ \n self.artifact_path = Path(run.info.artifact_uri[len('file:///') : ])\ \n else:\ \n self.artifact_path = Path(run.info.artifact_uri)\ \n with open(self.artifact_path/'deploy.json', 'r') as f:\ \n deployment_dict = json.load(f)\ \n with open(self.artifact_path/'features.txt', 'r') as f:\ \n self.train_features = f.readline().rstrip().split(',')\ \n\ \n self.dataLocation = base_config['dataLocation']\ \n self.selected_features = deployment_dict['load_data']['selected_features']\ \n self.target_feature = deployment_dict['load_data']['target_feature']\ \n self.output_model_dir = output_model_dir" if self.missing: text += "\n self.missing_values = deployment_dict['transformation']['fillna']" if self.word2num_features: text += "\n self.word2num_features = deployment_dict['transformation']['word2num_features']" if self.cat_encoder == 'labelencoding': text += "\n self.cat_encoder = deployment_dict['transformation']['cat_encoder']" elif (self.cat_encoder == 'targetencoding') or (self.cat_encoder == 'onehotencoding'): text += "\n self.cat_encoder = deployment_dict['transformation']['cat_encoder']['file']" text += "\n self.cat_encoder_cols = deployment_dict['transformation']['cat_encoder']['features']" if self.target_encoder: text += "\n self.target_encoder = joblib.load(self.artifact_path/deployment_dict['transformation']['target_encoder'])" if self.normalizer: text += "\n self.normalizer = joblib.load(self.artifact_path/deployment_dict['transformation']['normalizer']['file'])\ \n self.normalizer_col = deployment_dict['transformation']['normalizer']['features']" if self.text_profiler: text += "\n self.text_profiler = joblib.load(self.artifact_path/deployment_dict['transformation']['Status']['text_profiler']['file'])\ \n self.text_profiler_col = deployment_dict['transformation']['Status']['text_profiler']['features']" if self.feature_reducer: text += "\n self.feature_reducer = joblib.load(self.artifact_path/deployment_dict['featureengineering']['feature_reducer']['file'])\ \n self.feature_reducer_cols = deployment_dict['featureengineering']['feature_reducer']['features']" text += """ def read_data_from_db(self): if self.db_enabled: try: db = database(self.db_config) query = "SELECT * FROM {} WHERE model_ver = '{}' AND {} != ''".format(db.measurement, self.model_version, self.target_feature) if 'read_time' in self.db_config.keys() and self.db_config['read_time']: query += f" time > now() - {self.db_config['read_time']}" data = db.read_data(query) except: raise ValueError('Unable to read from the database') finally: if db: db.close() return data return None""" text += "\ \n def predict(self, data):\ \n df = pd.DataFrame()\ \n if Path(data).exists():\ \n if Path(data).suffix == '.tsv':\ \n df=read_data(data,encoding='utf-8',sep='\t')\ \n elif Path(data).suffix == '.csv':\ \n df=read_data(data,encoding='utf-8')\ \n else:\ \n if Path(data).suffix == '.json':\ \n jsonData = read_json(data)\ \n df = pd.json_normalize(jsonData)\ \n elif is_file_name_url(data):\ \n df = read_data(data,encoding='utf-8')\ \n else:\ \n jsonData = json.loads(data)\ \n df = pd.json_normalize(jsonData)\ \n if len(df) == 0:\ \n raise ValueError('No data record found')\ \n missing_features = [x for x in self.selected_features if x not in df.columns]\ \n if missing_features:\ \n raise ValueError(f'some feature/s is/are missing: {missing_features}')\ \n if self.target_feature not in df.columns:\ \n raise ValueError(f'Ground truth values/target column({self.target_feature}) not found in current data')\ \n df_copy = df.copy()\ \n df = df[self.selected_features]" if self.word2num_features: text += "\n for feat in self.word2num_features:" text += "\n df[ feat ] = df[feat].apply(lambda x: s2n(x))" if self.missing: text += "\n df.fillna(self.missing_values, inplace=True)" if self.cat_encoder == 'labelencoding': text += "\n df.replace(self.cat_encoder, inplace=True)" elif self.cat_encoder == 'targetencoding': text += "\n cat_enc = joblib.load(self.artifact_path/self.cat_encoder)" text += "\n df = cat_enc.transform(df)" elif self.cat_encoder == 'onehotencoding': text += "\n cat_enc = joblib.load(self.artifact_path/self.cat_encoder)" text += "\n transformed_data = cat_enc.transform(df[self.cat_encoder_cols]).toarray()" text += "\n df[cat_enc.get_feature_names()] = pd.DataFrame(transformed_data, columns=cat_enc.get_feature_names())[cat_enc.get_feature_names()]" if self.normalizer: text += "\n df[self.normalizer_col] = self.normalizer.transform(df[self.normalizer_col])" if self.text_profiler: text += "\n text_corpus = df[self.text_profiler_col].apply(lambda row: ' '.join(row.values.astype(str)), axis=1)\ \n df_vect=self.text_profiler.transform(text_corpus)\ \n if isinstance(df_vect, np.ndarray):\ \n df1 = pd.DataFrame(df_vect)\ \n else:\ \n df1 = pd.DataFrame(df_vect.toarray(),columns = self.text_profiler.named_steps['vectorizer'].get_feature_names())\ \n df1 = df1.add_suffix('_vect')\ \n df = pd.concat([df, df1],axis=1)" if self.feature_reducer: text += "\n df = self.feature_reducer.transform(df[self.feature_reducer_cols])" else: text += "\n df = df[self.train_features]" if self.target_encoder: text += "\n output = pd.DataFrame(self.model._model_impl.predict_proba(df), columns=self.target_encoder.classes_)\ \n df_copy['prediction'] = output.idxmax(axis=1)" else: text += "\n output = self.model.predict(df).reshape(1, -1)[0].round(2)\ \n df_copy['prediction'] = output" text += "\n return df_copy" if indent: text = text.replace('\n', (self.tab * indent) + '\n') return text def getClassificationMatrixCode(self, indent=0): text = "\ \ndef get_classification_metrices(actual_values, predicted_values):\ \n result = {}\ \n accuracy_score = sklearn.metrics.accuracy_score(actual_values, predicted_values)\ \n avg_precision = sklearn.metrics.precision_score(actual_values, predicted_values,\ \n average='macro')\ \n avg_recall = sklearn.metrics.recall_score(actual_values, predicted_values,\ \n average='macro')\ \n avg_f1 = sklearn.metrics.f1_score(actual_values, predicted_values,\ \n average='macro')\ \n\ \n result['accuracy'] = accuracy_score\ \n result['precision'] = avg_precision\ \n result['recall'] = avg_recall\ \n result['f1'] = avg_f1\ \n return result\ \n\ " if indent: text = text.replace('\n', (self.tab * indent) + '\n') return text def getRegrssionMatrixCode(self, indent=0): text = "\ \ndef get_regression_metrices( actual_values, predicted_values):\ \n result = {}\ \n\ \n me = np.mean(predicted_values - actual_values)\ \n sde = np.std(predicted_values - actual_values, ddof = 1)\ \n\ \n abs_err = np.abs(predicted_values - actual_values)\ \n mae = np.mean(abs_err)\ \n sdae = np.std(abs_err, ddof = 1)\ \n\ \n abs_perc_err = 100.*np.abs(predicted_values - actual_values) / actual_values\ \n mape = np.mean(abs_perc_err)\ \n sdape = np.std(abs_perc_err, ddof = 1)\ \n\ \n result['mean_error'] = me\ \n result['mean_abs_error'] = mae\ \n result['mean_abs_perc_error'] = mape\ \n result['error_std'] = sde\ \n result['abs_error_std'] = sdae\ \n result['abs_perc_error_std'] = sdape\ \n return result\ \n\ " if indent: text = text.replace('\n', (self.tab * indent) + '\n') return text def addSuffixCode(self, indent=1): text ="\n\ \ndef check_drift( config):\ \n prediction = predict(config)\ \n usecase = config['modelName'] + '_' + config['modelVersion']\ \n train_data_path = prediction.artifact_path/(usecase+'_data.csv')\ \n if not train_data_path.exists():\ \n raise ValueError(f'Training data not found at {train_data_path}')\ \n curr_with_pred = prediction.read_data_from_db()\ \n if prediction.target_feature not in curr_with_pred.columns:\ \n raise ValueError('Ground truth not updated for corresponding data in database')\ \n train_with_pred = prediction.predict(train_data_path)\ \n performance = {}" if self.problem_type == 'classification': text += "\n\ \n performance['train'] = get_classification_metrices(train_with_pred[prediction.target_feature], train_with_pred['prediction'])\ \n performance['current'] = get_classification_metrices(curr_with_pred[prediction.target_feature], curr_with_pred['prediction'])" else: text += "\n\ \n performance['train'] = get_regression_metrices(train_with_pred[prediction.target_feature], train_with_pred['prediction'])\ \n performance['current'] = get_regression_metrices(curr_with_pred[prediction.target_feature], curr_with_pred['prediction'])" text += "\n return performance" text += "\n\ \nif __name__ == '__main__':\ \n try:\ \n if len(sys.argv) < 2:\ \n raise ValueError('config file not present')\ \n config = sys.argv[1]\ \n if Path(config).is_file() and Path(config).suffix == '.json':\ \n with open(config, 'r') as f:\ \n config = json.load(f)\ \n else:\ \n config = json.loads(config)\ \n output = check_drift(config)\ \n status = {'Status':'Success','Message':json.loads(output)}\ \n print('output_drift:'+json.dumps(status))\ \n except Exception as e:\ \n status = {'Status':'Failure','Message':str(e)}\ \n print('output_drift:'+json.dumps(status))" if indent: text = text.replace('\n', (self.tab * indent) + '\n') return text def addStatement(self, statement, indent=1): self.codeText += '\n' + self.tab * indent + statement def generateCode(self): self.codeText += self.addDatabaseClass() self.codeText += self.addPredictClass() if self.problem_type == 'classification': self.codeText += self.getClassificationMatrixCode() elif self.problem_type == 'regression': self.codeText += self.getRegrssionMatrixCode() else: raise ValueError(f"Unsupported problem type: {self.problem_type}") self.codeText += self.addSuffixCode() def getCode(self): return self.codeText
deploy.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ import json class deploy(): def __init__(self, tab_size=4): self.tab = ' ' * tab_size self.codeText = "" self.input_files = {} self.output_files = {} self.addInputFiles({'metaData' : 'modelMetaData.json','log':'predict.log'}) def addInputFiles(self, files): if not isinstance(files, dict): raise TypeError(f"Required dict type got {type(files)} type") for k,v in files.items(): self.input_files[k] = v def addOutputFiles(self, files): if not isinstance(files, dict): raise TypeError(f"Required dict type got {type(files)} type") for k,v in files.items(): self.input_files[k] = v def getInputFiles(self): text = 'IOFiles = ' if not self.input_files: text += '{ }' else: text += json.dumps(self.input_files, indent=4) return text def getOutputFiles(self): text = 'output_file = ' if not self.output_files: text += '{ }' else: text += json.dumps(self.output_files, indent=4) return text def getInputOutputFiles(self, indent=0): text = '\n' text += self.getInputFiles() text += '\n' text += self.getOutputFiles() if indent: text = text.replace('\n', self.tab * indent + '\n') return text def addStatement(self, statement, indent=1): pass def getPredictionCodeModules(self): modules = [{'module':'json'} ,{'module':'joblib'} ,{'module':'pandas', 'mod_as':'pd'} ,{'module':'numpy', 'mod_as':'np'} ,{'module':'Path', 'mod_from':'pathlib'} ,{'module':'json_normalize', 'mod_from':'pandas'} ,{'module':'load_model', 'mod_from':'tensorflow.keras.models'} ] return modules def addPredictionCode(self): self.codeText += """ class deploy(): def __init__(self, base_config, log=None): self.targetPath = (Path('aion') / base_config['targetPath']).resolve() if log: self.logger = log else: log_file = self.targetPath / IOFiles['log'] self.logger = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem) try: self.initialize(base_config) except Exception as e: self.logger.error(e, exc_info=True) def initialize(self, base_config): targetPath = Path('aion') / base_config['targetPath'] meta_data_file = targetPath / IOFiles['metaData'] if meta_data_file.exists(): meta_data = utils.read_json(meta_data_file) self.dateTimeFeature = meta_data['training']['dateTimeFeature'] self.targetFeature = meta_data['training']['target_feature'] normalization_file = meta_data['transformation']['Status']['Normalization_file'] self.normalizer = joblib.load(normalization_file) self.lag_order = base_config['lag_order'] self.noofforecasts = base_config['noofforecasts'] run_id = str(meta_data['register']['runId']) model_path = str(targetPath/'runs'/str(meta_data['register']['runId'])/meta_data['register']['model']/'model') self.model = load_model(model_path) self.model_name = meta_data['register']['model'] def predict(self, data=None): try: return self.__predict(data) except Exception as e: if self.logger: self.logger.error(e, exc_info=True) raise ValueError(json.dumps({'Status': 'Failure', 'Message': str(e)})) def __predict(self, data=None): jsonData = json.loads(data) dataFrame = json_normalize(jsonData) xtrain = dataFrame if len(dataFrame) == 0: raise ValueError('No data record found') df_l = len(dataFrame) pred_threshold = 0.1 max_pred_by_user = round((df_l) * pred_threshold) # prediction for 24 steps or next 24 hours if self.noofforecasts == -1: self.noofforecasts = max_pred_by_user no_of_prediction = self.noofforecasts if (str(no_of_prediction) > str(max_pred_by_user)): no_of_prediction = max_pred_by_user noofforecasts = no_of_prediction # self.sfeatures.remove(self.datetimeFeature) features = self.targetFeature if len(features) == 1: xt = xtrain[features].values else: xt = xtrain[features].values xt = xt.astype('float32') xt = self.normalizer.transform(xt) pred_data = xt y_future = [] self.lag_order = int(self.lag_order) for i in range(int(no_of_prediction)): pdata = pred_data[-self.lag_order:] if len(features) == 1: pdata = pdata.reshape((1, self.lag_order)) else: pdata = pdata.reshape((1, self.lag_order, len(features))) if (len(features) > 1): pred = self.model.predict(pdata) predout = self.normalizer.inverse_transform(pred) y_future.append(predout) pred_data = np.append(pred_data, pred, axis=0) else: pred = self.model.predict(pdata) predout = self.normalizer.inverse_transform(pred) y_future.append(predout.flatten()[-1]) pred_data = np.append(pred_data, pred) pred = pd.DataFrame(index=range(0, len(y_future)), columns=self.targetFeature) for i in range(0, len(y_future)): pred.iloc[i] = y_future[i] predictions = pred forecast_output = predictions.to_json(orient='records') return forecast_output """ def getCode(self): return self.codeText def getServiceCode(self): return """ from http.server import BaseHTTPRequestHandler,HTTPServer from socketserver import ThreadingMixIn import os from os.path import expanduser import platform import threading import subprocess import argparse import re import cgi import json import shutil import logging import sys import time import seaborn as sns from pathlib import Path from predict import deploy import pandas as pd import scipy.stats as st import numpy as np import warnings from utility import * warnings.filterwarnings("ignore") config_input = None IOFiles = { "inputData": "rawData.dat", "metaData": "modelMetaData.json", "production": "production.json", "log": "aion.log", "monitoring":"monitoring.json", "prodData": "prodData", "prodDataGT":"prodDataGT" } def DistributionFinder(data): try: distributionName = "" sse = 0.0 KStestStatic = 0.0 dataType = "" if (data.dtype == "float64" or data.dtype == "float32"): dataType = "Continuous" elif (data.dtype == "int"): dataType = "Discrete" elif (data.dtype == "int64"): dataType = "Discrete" if (dataType == "Discrete"): distributions = [st.bernoulli, st.binom, st.geom, st.nbinom, st.poisson] index, counts = np.unique(data.astype(int), return_counts=True) if (len(index) >= 2): best_sse = np.inf y1 = [] total = sum(counts) mean = float(sum(index * counts)) / total variance = float((sum(index ** 2 * counts) - total * mean ** 2)) / (total - 1) dispersion = mean / float(variance) theta = 1 / float(dispersion) r = mean * (float(theta) / 1 - theta) for j in counts: y1.append(float(j) / total) pmf1 = st.bernoulli.pmf(index, mean) pmf2 = st.binom.pmf(index, len(index), p=mean / len(index)) pmf3 = st.geom.pmf(index, 1 / float(1 + mean)) pmf4 = st.nbinom.pmf(index, mean, r) pmf5 = st.poisson.pmf(index, mean) sse1 = np.sum(np.power(y1 - pmf1, 2.0)) sse2 = np.sum(np.power(y1 - pmf2, 2.0)) sse3 = np.sum(np.power(y1 - pmf3, 2.0)) sse4 = np.sum(np.power(y1 - pmf4, 2.0)) sse5 = np.sum(np.power(y1 - pmf5, 2.0)) sselist = [sse1, sse2, sse3, sse4, sse5] best_distribution = 'NA' for i in range(0, len(sselist)): if best_sse > sselist[i] > 0: best_distribution = distributions[i].name best_sse = sselist[i] elif (len(index) == 1): best_distribution = "Constant Data-No Distribution" best_sse = 0.0 distributionName = best_distribution sse = best_sse elif (dataType == "Continuous"): distributions = [st.uniform, st.expon, st.weibull_max, st.weibull_min, st.chi, st.norm, st.lognorm, st.t, st.gamma, st.beta] best_distribution = st.norm.name best_sse = np.inf datamin = data.min() datamax = data.max() nrange = datamax - datamin y, x = np.histogram(data.astype(float), bins='auto', density=True) x = (x + np.roll(x, -1))[:-1] / 2.0 for distribution in distributions: params = distribution.fit(data.astype(float)) arg = params[:-2] loc = params[-2] scale = params[-1] pdf = distribution.pdf(x, loc=loc, scale=scale, *arg) sse = np.sum(np.power(y - pdf, 2.0)) if (best_sse > sse > 0): best_distribution = distribution.name best_sse = sse distributionName = best_distribution sse = best_sse except: response = str(sys.exc_info()[0]) message = 'Job has Failed' + response exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) print(message) return distributionName, sse def getDriftDistribution(feature, dataframe, newdataframe=pd.DataFrame()): import matplotlib.pyplot as plt import math import io, base64, urllib np.seterr(divide='ignore', invalid='ignore') try: plt.clf() except: pass plt.rcParams.update({'figure.max_open_warning': 0}) sns.set(color_codes=True) pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] if len(feature) > 4: numneroffeatures = len(feature) plt.figure(figsize=(10, numneroffeatures*2)) else: plt.figure(figsize=(10,5)) for i in enumerate(feature): dataType = dataframe[i[1]].dtypes if dataType not in pandasNumericDtypes: dataframe[i[1]] = pd.Categorical(dataframe[i[1]]) dataframe[i[1]] = dataframe[i[1]].cat.codes dataframe[i[1]] = dataframe[i[1]].astype(int) dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mode()[0]) else: dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mean()) plt.subplots_adjust(hspace=0.5, wspace=0.7, top=1) plt.subplot(math.ceil((len(feature) / 2)), 2, i[0] + 1) distname, sse = DistributionFinder(dataframe[i[1]]) print(distname) ax = sns.distplot(dataframe[i[1]], label=distname) ax.legend(loc='best') if newdataframe.empty == False: dataType = newdataframe[i[1]].dtypes if dataType not in pandasNumericDtypes: newdataframe[i[1]] = pd.Categorical(newdataframe[i[1]]) newdataframe[i[1]] = newdataframe[i[1]].cat.codes newdataframe[i[1]] = newdataframe[i[1]].astype(int) newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mode()[0]) else: newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mean()) distname, sse = DistributionFinder(newdataframe[i[1]]) print(distname) ax = sns.distplot(newdataframe[i[1]],label=distname) ax.legend(loc='best') buf = io.BytesIO() plt.savefig(buf, format='png') buf.seek(0) string = base64.b64encode(buf.read()) uri = urllib.parse.quote(string) return uri def read_json(file_path): data = None with open(file_path,'r') as f: data = json.load(f) return data class HTTPRequestHandler(BaseHTTPRequestHandler): def do_POST(self): print('PYTHON ######## REQUEST ####### STARTED') if None != re.search('/AION/', self.path) or None != re.search('/aion/', self.path): ctype, pdict = cgi.parse_header(self.headers.get('content-type')) if ctype == 'application/json': length = int(self.headers.get('content-length')) data = self.rfile.read(length) usecase = self.path.split('/')[-2] if usecase.lower() == config_input['targetPath'].lower(): operation = self.path.split('/')[-1] data = json.loads(data) dataStr = json.dumps(data) if operation.lower() == 'predict': output=deployobj.predict(dataStr) resp = output elif operation.lower() == 'groundtruth': gtObj = groundtruth(config_input) output = gtObj.actual(dataStr) resp = output elif operation.lower() == 'delete': targetPath = Path('aion')/config_input['targetPath'] for file in data: x = targetPath/file if x.exists(): os.remove(x) resp = json.dumps({'Status':'Success'}) else: outputStr = json.dumps({'Status':'Error','Msg':'Operation not supported'}) resp = outputStr else: outputStr = json.dumps({'Status':'Error','Msg':'Wrong URL'}) resp = outputStr else: outputStr = json.dumps({'Status':'ERROR','Msg':'Content-Type Not Present'}) resp = outputStr resp=resp+'\\n' resp=resp.encode() self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() self.wfile.write(resp) else: print('python ==> else1') self.send_response(403) self.send_header('Content-Type', 'application/json') self.end_headers() print('PYTHON ######## REQUEST ####### ENDED') return def do_GET(self): print('PYTHON ######## REQUEST ####### STARTED') if None != re.search('/AION/', self.path) or None != re.search('/aion/', self.path): usecase = self.path.split('/')[-2] self.send_response(200) self.targetPath = Path('aion')/config_input['targetPath'] meta_data_file = self.targetPath/IOFiles['metaData'] if meta_data_file.exists(): meta_data = read_json(meta_data_file) else: raise ValueError(f'Configuration file not found: {meta_data_file}') production_file = self.targetPath/IOFiles['production'] if production_file.exists(): production_data = read_json(production_file) else: raise ValueError(f'Production Details not found: {production_file}') operation = self.path.split('/')[-1] if (usecase.lower() == config_input['targetPath'].lower()) and (operation.lower() == 'metrices'): self.send_header('Content-Type', 'text/html') self.end_headers() ModelString = production_data['Model'] ModelPerformance = ModelString+'_performance.json' performance_file = self.targetPath/ModelPerformance if performance_file.exists(): performance_data = read_json(performance_file) else: raise ValueError(f'Production Details not found: {performance_data}') Scoring_Creteria = performance_data['scoring_criteria'] train_score = round(performance_data['metrices']['train_score'],2) test_score = round(performance_data['metrices']['test_score'],2) current_score = 'NA' monitoring = read_json(self.targetPath/IOFiles['monitoring']) reader = dataReader(reader_type=monitoring['prod_db_type'],target_path=self.targetPath, config=monitoring['db_config']) inputDatafile = self.targetPath/IOFiles['inputData'] NoOfPrediction = 0 NoOfGroundTruth = 0 inputdistribution = '' if reader.file_exists(IOFiles['prodData']): dfPredict = reader.read(IOFiles['prodData']) dfinput = pd.read_csv(inputDatafile) features = meta_data['training']['features'] inputdistribution = getDriftDistribution(features,dfinput,dfPredict) NoOfPrediction = len(dfPredict) if reader.file_exists(IOFiles['prodDataGT']): dfGroundTruth = reader.read(IOFiles['prodDataGT']) NoOfGroundTruth = len(dfGroundTruth) common_col = [k for k in dfPredict.columns.tolist() if k in dfGroundTruth.columns.tolist()] proddataDF = pd.merge(dfPredict, dfGroundTruth, on =common_col,how = 'inner') if Scoring_Creteria.lower() == 'accuracy': from sklearn.metrics import accuracy_score current_score = accuracy_score(proddataDF[config_input['target_feature']], proddataDF['prediction']) current_score = round((current_score*100),2) elif Scoring_Creteria.lower() == 'recall': from sklearn.metrics import accuracy_score current_score = recall_score(proddataDF[config_input['target_feature']], proddataDF['prediction'],average='macro') current_score = round((current_score*100),2) msg = \"""<html> <head> <title>Performance Details</title> </head> <style> table, th, td {border} </style> <body> <h2><b>Deployed Model:</b>{ModelString}</h2> <br/> <table style="width:50%"> <tr> <td>No of Prediction</td> <td>{NoOfPrediction}</td> </tr> <tr> <td>No of GroundTruth</td> <td>{NoOfGroundTruth}</td> </tr> </table> <br/> <table style="width:100%"> <tr> <th>Score Type</th> <th>Train Score</th> <th>Test Score</th> <th>Production Score</th> </tr> <tr> <td>{Scoring_Creteria}</td> <td>{train_score}</td> <td>{test_score}</td> <td>{current_score}</td> </tr> </table> <br/> <br/> <img src="data:image/png;base64,{newDataDrift}" alt="" > </body> </html> \""".format(border='{border: 1px solid black;}',ModelString=ModelString,Scoring_Creteria=Scoring_Creteria,NoOfPrediction=NoOfPrediction,NoOfGroundTruth=NoOfGroundTruth,train_score=train_score,test_score=test_score,current_score=current_score,newDataDrift=inputdistribution) elif (usecase.lower() == config_input['targetPath'].lower()) and (operation.lower() == 'logs'): self.send_header('Content-Type', 'text/plain') self.end_headers() log_file = self.targetPath/IOFiles['log'] if log_file.exists(): with open(log_file) as f: msg = f.read() f.close() else: raise ValueError(f'Log Details not found: {log_file}') else: self.send_header('Content-Type', 'application/json') self.end_headers() features = meta_data['load_data']['selected_features'] bodydes='[' for x in features: if bodydes != '[': bodydes = bodydes+',' bodydes = bodydes+'{"'+x+'":"value"}' bodydes+=']' urltext = '/AION/'+config_input['targetPath']+'/predict' urltextgth='/AION/'+config_input['targetPath']+'/groundtruth' urltextproduction='/AION/'+config_input['targetPath']+'/metrices' msg=\""" Version:{modelversion} RunNo: {runNo} URL for Prediction ================== URL:{url} RequestType: POST Content-Type=application/json Body: {displaymsg} Output: prediction,probability(if Applicable),remarks corresponding to each row. URL for GroundTruth =================== URL:{urltextgth} RequestType: POST Content-Type=application/json Note: Make Sure that one feature (ID) should be unique in both predict and groundtruth. Otherwise outputdrift will not work URL for Model In Production Analysis ==================================== URL:{urltextproduction} RequestType: GET Content-Type=application/json \""".format(modelversion=config_input['modelVersion'],runNo=config_input['deployedRunNo'],url=urltext,urltextgth=urltextgth,urltextproduction=urltextproduction,displaymsg=bodydes) self.wfile.write(msg.encode()) else: self.send_response(403) self.send_header('Content-Type', 'application/json') self.end_headers() return class ThreadedHTTPServer(ThreadingMixIn, HTTPServer): allow_reuse_address = True def shutdown(self): self.socket.close() HTTPServer.shutdown(self) class file_status(): def __init__(self, reload_function, params, file, logger): self.files_status = {} self.initializeFileStatus(file) self.reload_function = reload_function self.params = params self.logger = logger def initializeFileStatus(self, file): self.files_status = {'path': file, 'time':file.stat().st_mtime} def is_file_changed(self): if self.files_status['path'].stat().st_mtime > self.files_status['time']: self.files_status['time'] = self.files_status['path'].stat().st_mtime return True return False def run(self): global config_input while( True): time.sleep(30) if self.is_file_changed(): production_details = targetPath/IOFiles['production'] if not production_details.exists(): raise ValueError(f'Model in production details does not exist') productionmodel = read_json(production_details) config_file = Path(__file__).parent/'config.json' if not Path(config_file).exists(): raise ValueError(f'Config file is missing: {config_file}') config_input = read_json(config_file) config_input['deployedModel'] = productionmodel['Model'] config_input['deployedRunNo'] = productionmodel['runNo'] self.logger.info('Model changed Reloading.....') self.logger.info(f'Model: {config_input["deployedModel"]}') self.logger.info(f'Version: {str(config_input["modelVersion"])}') self.logger.info(f'runNo: {str(config_input["deployedRunNo"])}') self.reload_function(config_input) class SimpleHttpServer(): def __init__(self, ip, port, model_file_path,reload_function,params, logger): self.server = ThreadedHTTPServer((ip,port), HTTPRequestHandler) self.status_checker = file_status( reload_function, params, model_file_path, logger) def start(self): self.server_thread = threading.Thread(target=self.server.serve_forever) self.server_thread.daemon = True self.server_thread.start() self.status_thread = threading.Thread(target=self.status_checker.run) self.status_thread.start() def waitForThread(self): self.server_thread.join() self.status_thread.join() def stop(self): self.server.shutdown() self.waitForThread() if __name__=='__main__': parser = argparse.ArgumentParser(description='HTTP Server') parser.add_argument('-ip','--ipAddress', help='HTTP Server IP') parser.add_argument('-pn','--portNo', type=int, help='Listening port for HTTP Server') args = parser.parse_args() config_file = Path(__file__).parent/'config.json' if not Path(config_file).exists(): raise ValueError(f'Config file is missing: {config_file}') config = read_json(config_file) if args.ipAddress: config['ipAddress'] = args.ipAddress if args.portNo: config['portNo'] = args.portNo targetPath = Path('aion')/config['targetPath'] if not targetPath.exists(): raise ValueError(f'targetPath does not exist') production_details = targetPath/IOFiles['production'] if not production_details.exists(): raise ValueError(f'Model in production details does not exist') productionmodel = read_json(production_details) config['deployedModel'] = productionmodel['Model'] config['deployedRunNo'] = productionmodel['runNo'] #server = SimpleHttpServer(config['ipAddress'],int(config['portNo'])) config_input = config logging.basicConfig(filename= Path(targetPath)/IOFiles['log'], filemode='a', format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S') logger = logging.getLogger(Path(__file__).parent.name) deployobj = deploy(config_input, logger) server = SimpleHttpServer(config['ipAddress'],int(config['portNo']),targetPath/IOFiles['production'],deployobj.initialize,config_input, logger) logger.info('HTTP Server Running...........') logger.info(f"IP Address: {config['ipAddress']}") logger.info(f"Port No.: {config['portNo']}") print('HTTP Server Running...........') print('For Prediction') print('================') print('Request Type: Post') print('Content-Type: application/json') print('URL: /AION/'+config['targetPath']+'/predict') print('\\nFor GroundTruth') print('================') print('Request Type: Post') print('Content-Type: application/json') print('URL: /AION/'+config['targetPath']+'/groundtruth') print('\\nFor Help') print('================') print('Request Type: Get') print('Content-Type: application/json') print('URL: /AION/'+config['targetPath']+'/help') print('\\nFor Model In Production Analysis') print('================') print('Request Type: Get') print('Content-Type: application/json') print('URL: /AION/'+config['targetPath']+'/metrices') server.start() server.waitForThread() """
trainer.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ import json class learner(): def __init__(self, problem_type="classification", target_feature="", sample_method=None,indent=0, tab_size=4): self.tab = " "*tab_size self.df_name = 'df' self.problem_type = problem_type self.target_feature = target_feature self.search_space = [] self.codeText = f"\ndef train(log):" self.input_files = {} self.output_files = {} self.function_code = '' self.addInputFiles({'inputData' : 'featureEngineeredData.dat', 'metaData' : 'modelMetaData.json','monitor':'monitoring.json'}) def addInputFiles(self, files): if not isinstance(files, dict): raise TypeError(f"Required dict type got {type(files)} type") for k,v in files.items(): self.input_files[k] = v def addOutputFiles(self, files): if not isinstance(files, dict): raise TypeError(f"Required dict type got {type(files)} type") for k,v in files.items(): self.input_files[k] = v def getInputFiles(self): text = 'IOFiles = ' if not self.input_files: text += '{ }' else: text += json.dumps(self.input_files, indent=4) return text def getOutputFiles(self): text = 'output_file = ' if not self.output_files: text += '{ }' else: text += json.dumps(self.output_files, indent=4) return text def getInputOutputFiles(self, indent=0): text = '\n' text += self.getInputFiles() if indent: text = text.replace('\n', self.tab * indent + '\n') return text def __addValidateConfigCode(self): text = "\n\ \ndef validateConfig():\ \n config_file = Path(__file__).parent/'config.json'\ \n if not Path(config_file).exists():\ \n raise ValueError(f'Config file is missing: {config_file}')\ \n config = utils.read_json(config_file)\ \n return config" return text def addStatement(self, statement, indent=1): self.codeText += '\n' + self.tab * indent + statement def getCode(self): return self.function_code + '\n' + self.codeText def addLocalFunctionsCode(self): self.function_code += self.__addValidateConfigCode() def getPrefixModules(self): modules = [{'module':'Path', 'mod_from':'pathlib'} ,{'module':'pandas', 'mod_as':'pd'} ] return modules def addPrefixCode(self, indent=1): self.codeText += "\ " def getSuffixModules(self): modules = [] return modules def addSuffixCode(self, indent=1): self.codeText += "\n\ " def getMainCodeModules(self): modules = [{'module':'logging'} ] return modules def getMlpCodeModules(self): modules = [{'module':'math'} ,{'module':'json'} ,{'module':'joblib'} ,{'module':'keras_tuner'} ,{'module':'pandas', 'mod_as':'pd'} ,{'module':'numpy', 'mod_as':'np'} ,{'module':'Path', 'mod_from':'pathlib'} ,{'module':'r2_score', 'mod_from':'sklearn.metrics'} ,{'module':'mean_squared_error', 'mod_from':'sklearn.metrics'} ,{'module':'mean_absolute_error', 'mod_from':'sklearn.metrics'} ,{'module':'Dense', 'mod_from':'tensorflow.keras.layers'} ,{'module':'Sequential', 'mod_from':'tensorflow.keras'} ,{'module':'Dropout', 'mod_from':'tensorflow.keras.layers'} ] return modules def addMlpCode(self): self.codeText = """ def getdlparams(config): for k, v in config.items(): if (k == "activation"): activation_fn = str(v) elif (k == "optimizer"): optimizer = str(v) elif (k == "loss"): loss_fn = str(v) elif (k == "first_layer"): if not isinstance(k, list): first_layer = str(v).split(',') else: first_layer = k elif (k == "lag_order"): lag_order = int(v) elif (k == "hidden_layers"): hidden_layers = int(v) elif (k == "dropout"): if not isinstance(k, list): dropout = str(v).split(',') else: dropout = k elif (k == "batch_size"): batch_size = int(v) elif (k == "epochs"): epochs = int(v) elif (k == "model_name"): model_name = str(v) return activation_fn, optimizer, loss_fn, first_layer, lag_order, hidden_layers, dropout, batch_size, epochs, model_name def numpydf(dataset, look_back): dataX, dataY = [], [] for i in range(len(dataset) - look_back - 1): subset = dataset[i:(i + look_back), 0] dataX.append(subset) dataY.append(dataset[i + look_back, 0]) return np.array(dataX), np.array(dataY) def startTraining(dataset,train_size,mlpConfig,filename_scaler,target_feature,scoreParam,log): log.info('Training started') activation_fn, optimizer, loss_fn, first_layer, hidden_layers, look_back, dropout, batch_size, epochs, model_name = getdlparams(mlpConfig) hp = keras_tuner.HyperParameters() first_layer_min = round(int(first_layer[0])) first_layer_max = round(int(first_layer[1])) dropout_min = float(dropout[0]) dropout_max = float(dropout[1]) dataset = dataset.values train, test = dataset[0:train_size, :], dataset[train_size:len(dataset), :] trainX, trainY = numpydf(train, look_back) testX, testY = numpydf(test, look_back) # create and fit Multilayer Perceptron model model = Sequential() model.add(Dense(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16), input_dim=look_back, activation=activation_fn)) #BUGID 13484 model.add(Dropout(hp.Float('Dropout_rate',min_value=dropout_min,max_value=dropout_max,step=0.1))) #BUGID 13484 model.add(Dense(1, activation='sigmoid')) model.compile(loss=loss_fn, optimizer=optimizer) model_fit = model.fit(trainX, trainY, epochs=epochs, batch_size=batch_size, verbose=2) # Estimate model performance trainScore = model.evaluate(trainX, trainY, verbose=0) testScore = model.evaluate(testX, testY, verbose=0) # Scoring values for the model mse_eval = testScore rmse_eval = math.sqrt(testScore) # generate predictions for training trainPredict = model.predict(trainX) testPredict = model.predict(testX) scaler = joblib.load(filename_scaler) trainY = scaler.inverse_transform([trainY]) trainPredict = scaler.inverse_transform(trainPredict) ## For test data testY = scaler.inverse_transform([testY]) testPredict = scaler.inverse_transform(testPredict) mse_mlp = mean_squared_error(testY.T, testPredict) scores = {} r2 = round(r2_score(testY.T, testPredict), 2) scores['R2'] = r2 mae = round(mean_absolute_error(testY.T, testPredict), 2) scores['MAE'] = mae scores['MSE'] = round(mse_mlp, 2) rmse = round(math.sqrt(mse_mlp), 2) scores['RMSE'] = rmse scores[scoreParam] = scores.get(scoreParam.upper(), scores['MSE']) log.info("mlp rmse: "+str(rmse)) log.info("mlp mse: "+str(round(mse_mlp, 2))) log.info("mlp r2: "+str(r2)) log.info("mlp mae: "+str(mae)) return model, look_back, scaler,testScore,trainScore,scores def train(config, targetPath, log): dataLoc = targetPath / IOFiles['inputData'] if not dataLoc.exists(): return {'Status': 'Failure', 'Message': 'Data location does not exists.'} status = dict() usecase = config['targetPath'] df = utils.read_data(dataLoc) target_feature = config['target_feature'] dateTimeFeature= config['dateTimeFeature'] df.set_index(dateTimeFeature, inplace=True) train_size = int(len(df) * (1-config['test_ratio'])) #BugID:13217 mlpConfig = config['algorithms']['MLP'] filename = meta_data['transformation']['Status']['Normalization_file'] scoreParam = config['scoring_criteria'] log.info('Training MLP for TimeSeries') mlp_model, look_back, scaler,testScore,trainScore, error_matrix = startTraining(df,train_size,mlpConfig,filename,target_feature,scoreParam,log) score = error_matrix[scoreParam] # Training model model_path = targetPath/'runs'/str(meta_data['monitoring']['runId'])/model_name model_file_name = str(model_path/'model') mlp_model.save(model_file_name) meta_data['training'] = {} meta_data['training']['model_filename'] = model_file_name meta_data['training']['dateTimeFeature'] = dateTimeFeature meta_data['training']['target_feature'] = target_feature utils.write_json(meta_data, targetPath / IOFiles['metaData']) utils.write_json({'scoring_criteria': scoreParam, 'metrices': error_matrix,'score':error_matrix[scoreParam]}, model_path / IOFiles['metrics']) # return status status = {'Status': 'Success', 'errorMatrix': error_matrix, 'test_score':testScore, 'train_score': trainScore,'score':error_matrix[scoreParam]} log.info(f'Test score: {testScore}') log.info(f'Train score: {trainScore}') log.info(f'output: {status}') return json.dumps(status) """ def getLstmCodeModules(self): modules = [{'module':'math'} ,{'module':'json'} ,{'module':'joblib'} ,{'module':'keras_tuner'} ,{'module':'pandas', 'mod_as':'pd'} ,{'module':'numpy', 'mod_as':'np'} ,{'module':'Path', 'mod_from':'pathlib'} ,{'module':'r2_score', 'mod_from':'sklearn.metrics'} ,{'module':'mean_squared_error', 'mod_from':'sklearn.metrics'} ,{'module':'mean_absolute_error', 'mod_from':'sklearn.metrics'} ,{'module':'Dense', 'mod_from':'tensorflow.keras.layers'} ,{'module':'Sequential', 'mod_from':'tensorflow.keras'} ,{'module':'Dropout', 'mod_from':'tensorflow.keras.layers'} ,{'module':'LSTM', 'mod_from':'tensorflow.keras.layers'} ,{'module':'TimeseriesGenerator', 'mod_from':'tensorflow.keras.preprocessing.sequence'} ,{'module':'train_test_split', 'mod_from':'sklearn.model_selection'} ] return modules def addLstmCode(self): self.codeText = """ def getdlparams(config): for k, v in config.items(): if (k == "activation"): activation_fn = str(v) elif (k == "optimizer"): optimizer = str(v) elif (k == "loss"): loss_fn = str(v) elif (k == "first_layer"): if not isinstance(k, list): first_layer = str(v).split(',') else: first_layer = k elif (k == "lag_order"): lag_order = int(v) elif (k == "hidden_layers"): hidden_layers = int(v) elif (k == "dropout"): if not isinstance(k, list): dropout = str(v).split(',') else: dropout = k elif (k == "batch_size"): batch_size = int(v) elif (k == "epochs"): epochs = int(v) return activation_fn, optimizer, loss_fn, first_layer, lag_order, hidden_layers, dropout, batch_size, epochs def numpydf(dataset, look_back): dataX, dataY = [], [] for i in range(len(dataset) - look_back - 1): subset = dataset[i:(i + look_back), 0] dataX.append(subset) dataY.append(dataset[i + look_back, 0]) return np.array(dataX), np.array(dataY) def startTraining(dataset,test_size,mlpConfig,filename_scaler,target_feature,scoreParam,log): log.info('Training started') activation_fn, optimizer, loss_fn, first_layer, look_back,hidden_layers, dropout, batch_size, epochs= getdlparams(mlpConfig) n_features = len(target_feature) n_input = look_back hp = keras_tuner.HyperParameters() first_layer_min = round(int(first_layer[0])) first_layer_max = round(int(first_layer[1])) dropout_min = float(dropout[0]) dropout_max = float(dropout[1]) dataset = dataset[target_feature] dataset_np = dataset.values train, test = train_test_split(dataset_np, test_size=test_size, shuffle=False) generatorTrain = TimeseriesGenerator(train, train, length=n_input, batch_size=8) generatorTest = TimeseriesGenerator(test, test, length=n_input, batch_size=8) batch_0 = generatorTrain[0] x, y = batch_0 epochs = int(epochs) ##Multivariate LSTM model model = Sequential() model.add(LSTM(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16), activation=activation_fn, input_shape=(n_input, n_features))) model.add(Dropout(hp.Float('Dropout_rate',min_value=dropout_min,max_value=dropout_max,step=0.1))) model.add(Dense(n_features)) model.compile(optimizer=optimizer, loss=loss_fn) # model.fit(generatorTrain,epochs=epochs,batch_size=self.batch_size,shuffle=False) model.fit_generator(generatorTrain, steps_per_epoch=1, epochs=epochs, shuffle=False, verbose=0) # lstm_mv_testScore_mse = model.evaluate(x, y, verbose=0) predictions = [] future_pred_len = n_input # To get values for prediction,taking look_back steps of rows first_batch = train[-future_pred_len:] c_batch = first_batch.reshape((1, future_pred_len, n_features)) current_pred = None for i in range(len(test)): # get pred for firstbatch current_pred = model.predict(c_batch)[0] predictions.append(current_pred) # remove first val c_batch_rmv_first = c_batch[:, 1:, :] # update c_batch = np.append(c_batch_rmv_first, [[current_pred]], axis=1) ## Prediction, inverse the minmax transform scaler = joblib.load(filename_scaler) prediction_actual = scaler.inverse_transform(predictions) test_data_actual = scaler.inverse_transform(test) mse = None rmse = None ## Creating dataframe for actual,predictions pred_cols = list() for i in range(len(target_feature)): pred_cols.append(target_feature[i] + '_pred') predictions = pd.DataFrame(prediction_actual, columns=pred_cols) actual = pd.DataFrame(test_data_actual, columns=target_feature) actual.columns = [str(col) + '_actual' for col in dataset.columns] df_predicted = pd.concat([actual, predictions], axis=1) print("LSTM Multivariate prediction dataframe: \\n" + str(df_predicted)) # df_predicted.to_csv('mlp_prediction.csv') from math import sqrt from sklearn.metrics import mean_squared_error from sklearn.metrics import r2_score from sklearn.metrics import mean_absolute_error target = target_feature mse_dict = {} rmse_dict = {} mae_dict = {} r2_dict = {} lstm_var = 0 for name in target: index = dataset.columns.get_loc(name) mse = mean_squared_error(test_data_actual[:, index], prediction_actual[:, index]) mse_dict[name] = mse rmse = sqrt(mse) rmse_dict[name] = rmse lstm_var = lstm_var + rmse print("Name of the target feature: " + str(name)) print("RMSE of the target feature: " + str(rmse)) r2 = r2_score(test_data_actual[:, index], prediction_actual[:, index]) r2_dict[name] = r2 mae = mean_absolute_error(test_data_actual[:, index], prediction_actual[:, index]) mae_dict[name] = mae ## For VAR comparison, send last target mse and rmse from above dict lstm_var = lstm_var / len(target) select_msekey = list(mse_dict.keys())[-1] l_mse = list(mse_dict.values())[-1] select_rmsekey = list(rmse_dict.keys())[-1] l_rmse = list(rmse_dict.values())[-1] select_r2key = list(r2_dict.keys())[-1] l_r2 = list(r2_dict.values())[-1] select_maekey = list(mae_dict.keys())[-1] l_mae = list(mae_dict.values())[-1] log.info('Selected target feature of LSTM for best model selection: ' + str(select_rmsekey)) scores = {} scores['R2'] = l_r2 scores['MAE'] = l_mae scores['MSE'] = l_mse scores['RMSE'] = l_rmse scores[scoreParam] = scores.get(scoreParam.upper(), scores['MSE']) log.info("lstm rmse: "+str(l_rmse)) log.info("lstm mse: "+str(l_mse)) log.info("lstm r2: "+str(l_r2)) log.info("lstm mae: "+str(l_mae)) return model,look_back,scaler, scores def train(config, targetPath, log): dataLoc = targetPath / IOFiles['inputData'] if not dataLoc.exists(): return {'Status': 'Failure', 'Message': 'Data location does not exists.'} status = dict() usecase = config['targetPath'] df = utils.read_data(dataLoc) target_feature = config['target_feature'] dateTimeFeature= config['dateTimeFeature'] scoreParam = config['scoring_criteria'] testSize = config['test_ratio'] lstmConfig = config['algorithms']['LSTM'] filename = meta_data['transformation']['Status']['Normalization_file'] if (type(target_feature) is list): pass else: target_feature = list(target_feature.split(",")) df.set_index(dateTimeFeature, inplace=True) log.info('Training LSTM for TimeSeries') mlp_model, look_back, scaler, error_matrix = startTraining(df,testSize,lstmConfig,filename,target_feature,scoreParam,log) score = error_matrix[scoreParam] log.info("LSTM Multivariant all scoring param results: "+str(error_matrix)) # Training model model_path = targetPath/'runs'/str(meta_data['monitoring']['runId'])/model_name model_file_name = str(model_path/'model') mlp_model.save(model_file_name) meta_data['training'] = {} meta_data['training']['model_filename'] = model_file_name meta_data['training']['dateTimeFeature'] = dateTimeFeature meta_data['training']['target_feature'] = target_feature utils.write_json(meta_data, targetPath / IOFiles['metaData']) utils.write_json({'scoring_criteria': scoreParam, 'metrices': error_matrix,'score':error_matrix[scoreParam]}, model_path / IOFiles['metrics']) # return status status = {'Status': 'Success', 'errorMatrix': error_matrix,'score':error_matrix[scoreParam]} log.info(f'score: {error_matrix[scoreParam]}') log.info(f'output: {status}') return json.dumps(status) """ def addMainCode(self, indent=1): self.codeText += """ if __name__ == '__main__': config = validateConfig() targetPath = Path('aion') / config['targetPath'] if not targetPath.exists(): raise ValueError(f'targetPath does not exist') meta_data_file = targetPath / IOFiles['metaData'] if meta_data_file.exists(): meta_data = utils.read_json(meta_data_file) else: raise ValueError(f'Configuration file not found: {meta_data_file}') log_file = targetPath / IOFiles['log'] log = utils.logger(log_file, mode='a', logger_name=Path(__file__).parent.stem) try: print(train(config, targetPath, log)) except Exception as e: status = {'Status': 'Failure', 'Message': str(e)} print(json.dumps(status)) """ def add_variable(self, name, value, indent=1): if isinstance(value, str): self.codeText += f"\n{self.tab * indent}{name} = '{value}'" else: self.codeText += f"\n{self.tab * indent}{name} = {value}" def addStatement(self, statement, indent=1): self.codeText += f"\n{self.tab * indent}{statement}"
selector.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ import json class selector(): def __init__(self, indent=0, tab_size=4): self.tab = " "*tab_size self.codeText = "" self.pipe = 'pipe' self.code_generated = False self.input_files = {} self.output_files = {} self.function_code = '' self.addInputFiles({'inputData' : 'transformedData.dat', 'metaData' : 'modelMetaData.json','log' : 'aion.log','outputData' : 'featureEngineeredData.dat'}) def addInputFiles(self, files): if not isinstance(files, dict): raise TypeError(f"Required dict type got {type(files)} type") for k,v in files.items(): self.input_files[k] = v def addOutputFiles(self, files): if not isinstance(files, dict): raise TypeError(f"Required dict type got {type(files)} type") for k,v in files.items(): self.input_files[k] = v def getInputFiles(self): text = 'IOFiles = ' if not self.input_files: text += '{ }' else: text += json.dumps(self.input_files, indent=4) return text def getOutputFiles(self): text = 'output_file = ' if not self.output_files: text += '{ }' else: text += json.dumps(self.output_files, indent=4) return text def getInputOutputFiles(self, indent=0): text = '\n' text += self.getInputFiles() if indent: text = text.replace('\n', self.tab * indent + '\n') return text def __addValidateConfigCode(self): text = "\n\ \ndef validateConfig():\ \n config_file = Path(__file__).parent/'config.json'\ \n if not Path(config_file).exists():\ \n raise ValueError(f'Config file is missing: {config_file}')\ \n config = read_json(config_file)\ \n return config" return text def addMainCode(self): self.codeText += """ if __name__ == '__main__': config = validateConfig() targetPath = Path('aion') / config['targetPath'] if not targetPath.exists(): raise ValueError(f'targetPath does not exist') meta_data_file = targetPath / IOFiles['metaData'] if meta_data_file.exists(): meta_data = read_json(meta_data_file) else: raise ValueError(f'Configuration file not found: {meta_data_file}') log_file = targetPath / IOFiles['log'] log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem) try: print(featureSelector(config,targetPath, log)) except Exception as e: status = {'Status': 'Failure', 'Message': str(e)} print(json.dumps(status)) """ def addValidateConfigCode(self, indent=1): self.function_code += self.__addValidateConfigCode() def addStatement(self, statement, indent=1): self.codeText += '\n' + self.tab * indent + statement def getCode(self): return self.function_code + '\n' + self.codeText def addLocalFunctionsCode(self): self.addValidateConfigCode() def getPrefixModules(self): modules = [{'module':'Path', 'mod_from':'pathlib'} ,{'module':'pandas', 'mod_as':'pd'} ] return modules def addPrefixCode(self, indent=1): self.codeText += """ def featureSelector(config, targetPath, log): dataLoc = targetPath / IOFiles['inputData'] if not dataLoc.exists(): return {'Status': 'Failure', 'Message': 'Data location does not exists.'} status = dict() df = pd.read_csv(dataLoc) log.log_dataframe(df) csv_path = str(targetPath / IOFiles['outputData']) write_data(df, csv_path, index=False) status = {'Status': 'Success', 'dataFilePath': IOFiles['outputData']} log.info(f'Selected data saved at {csv_path}') meta_data['featureengineering'] = {} meta_data['featureengineering']['Status'] = status write_json(meta_data, str(targetPath / IOFiles['metaData'])) log.info(f'output: {status}') return json.dumps(status) """ def getSuffixModules(self): modules = [] return modules def addSuffixCode(self, indent=1): self.codeText += "" def getMainCodeModules(self): modules = [ {'module':'json'} ,{'module':'logging'} ] return modules def addStatement(self, statement, indent=1): self.codeText += f"\n{self.tab * indent}{statement}" def getPipe(self): return self.pipe
utility.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from .imports import importModule utility_functions = { 'load_data': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], 'transformer': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], 'selector': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], 'train': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], 'register': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], 'Prediction': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], 'drift': ['read_json','write_json','read_data','write_data','is_file_name_url','logger_class'], } #TODO convert read and write functions in to class functions functions_code = { 'read_json':{'imports':[{'mod':'json'}],'code':"\n\ \ndef read_json(file_path):\ \n data = None\ \n with open(file_path,'r') as f:\ \n data = json.load(f)\ \n return data\ \n"}, 'write_json':{'imports':[{'mod':'json'}],'code':"\n\ \ndef write_json(data, file_path):\ \n with open(file_path,'w') as f:\ \n json.dump(data, f)\ \n"}, 'read_data':{'imports':[{'mod':'pandas','mod_as':'pd'}],'code':"\n\ \ndef read_data(file_path, encoding='utf-8', sep=','):\ \n return pd.read_csv(file_path, encoding=encoding, sep=sep)\ \n"}, 'write_data':{'imports':[{'mod':'pandas','mod_as':'pd'}],'code':"\n\ \ndef write_data(data, file_path, index=False):\ \n return data.to_csv(file_path, index=index)\ \n\ \n#Uncomment and change below code for google storage\ \n#from google.cloud import storage\ \n#def write_data(data, file_path, index=False):\ \n# file_name= file_path.name\ \n# data.to_csv('output_data.csv')\ \n# storage_client = storage.Client()\ \n# bucket = storage_client.bucket('aion_data')\ \n# bucket.blob('prediction/'+file_name).upload_from_filename('output_data.csv', content_type='text/csv')\ \n# return data\ \n"}, 'is_file_name_url':{'imports':[],'code':"\n\ \ndef is_file_name_url(file_name):\ \n supported_urls_starts_with = ('gs://','https://','http://')\ \n return file_name.startswith(supported_urls_starts_with)\ \n"}, 'logger_class':{'imports':[{'mod':'logging'}, {'mod':'io'}],'code':"\n\ \nclass logger():\ \n #setup the logger\ \n def __init__(self, log_file, mode='w', logger_name=None):\ \n logging.basicConfig(filename=log_file, filemode=mode, format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S')\ \n self.log = logging.getLogger(logger_name)\ \n\ \n #get logger\ \n def getLogger(self):\ \n return self.log\ \n\ \n def info(self, msg):\ \n self.log.info(msg)\ \n\ \n def error(self, msg, exc_info=False):\ \n self.log.error(msg,exc_info)\ \n\ \n # format and log dataframe\ \n def log_dataframe(self, df, rows=2, msg=None):\ \n buffer = io.StringIO()\ \n df.info(buf=buffer)\ \n log_text = 'Data frame{}'.format(' after ' + msg + ':' if msg else ':')\ \n log_text += '\\n\\t'+str(df.head(rows)).replace('\\n','\\n\\t')\ \n log_text += ('\\n\\t' + buffer.getvalue().replace('\\n','\\n\\t'))\ \n self.log.info(log_text)\ \n"}, } class utility_function(): def __init__(self, module): if module in utility_functions.keys(): self.module_name = module else: self.module_name = None self.importer = importModule() self.codeText = "" def get_code(self): code = "" if self.module_name: functions = utility_functions[self.module_name] for function in functions: self.codeText += self.get_function_code(function) code = self.importer.getCode() code += self.codeText return code def get_function_code(self, name): code = "" if name in functions_code.keys(): code += functions_code[name]['code'] if self.importer: if 'imports' in functions_code[name].keys(): for module in functions_code[name]['imports']: mod_name = module['mod'] mod_from = module.get('mod_from', None) mod_as = module.get('mod_as', None) self.importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as) return code def get_importer(self): return self.importer if __name__ == '__main__': obj = utility_function('load_data') p = obj.get_utility_code() print(p)
drift_analysis.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ import json class drift(): def __init__(self, indent=0, tab_size=4): self.tab = " "*tab_size self.codeText = "" self.function_code = "" self.input_files = {} self.output_files = {} self.addInputFiles({'log' : 'aion.log', 'metaData' : 'modelMetaData.json'}) def addInputFiles(self, files): if not isinstance(files, dict): raise TypeError(f"Required dict type got {type(files)} type") for k,v in files.items(): self.input_files[k] = v def addOutputFiles(self, files): if not isinstance(files, dict): raise TypeError(f"Required dict type got {type(files)} type") for k,v in files.items(): self.input_files[k] = v def getInputFiles(self): text = 'IOFiles = ' if not self.input_files: text += '{ }' else: text += json.dumps(self.input_files, indent=4) return text def getOutputFiles(self): text = 'output_file = ' if not self.output_files: text += '{ }' else: text += json.dumps(self.output_files, indent=4) return text def getInputOutputFiles(self, indent=0): text = '\n' text += self.getInputFiles() if indent: text = text.replace('\n', self.tab * indent + '\n') return text def __addValidateConfigCode(self): text = "\n\ \ndef validateConfig():\ \n config_file = Path(__file__).parent/'config.json'\ \n if not Path(config_file).exists():\ \n raise ValueError(f'Config file is missing: {config_file}')\ \n config = utils.read_json(config_file)\ \n return config\ " return text def addLocalFunctionsCode(self): self.function_code += self.__addValidateConfigCode() def addPrefixCode(self, smaller_is_better=False, indent=1): self.codeText += """ def monitoring(config, targetPath, log): retrain = False last_run_id = 0 retrain_threshold = config.get('retrainThreshold', 100) meta_data_file = targetPath / IOFiles['metaData'] if meta_data_file.exists(): meta_data = utils.read_json(meta_data_file) if not meta_data.get('register', None): log.info('Last time Pipeline not executed properly') retrain = True else: last_run_id = meta_data['register']['runId'] df = utils.read_data(config['dataLocation']) df_len = len(df) if not meta_data['monitoring'].get('endIndex', None): meta_data['monitoring']['endIndex'] = int(meta_data['load_data']['Status']['Records']) meta_data['monitoring']['endIndexTemp'] = meta_data['monitoring']['endIndex'] if meta_data['register'].get('registered', False): meta_data['monitoring']['endIndex'] = meta_data['monitoring']['endIndexTemp'] meta_data['register']['registered'] = False #ack registery if (meta_data['monitoring']['endIndex'] + retrain_threshold) < df_len: meta_data['monitoring']['endIndexTemp'] = df_len retrain = True else: log.info('Pipeline running first time') meta_data = {} meta_data['monitoring'] = {} retrain = True if retrain: meta_data['monitoring']['runId'] = last_run_id + 1 meta_data['monitoring']['retrain'] = retrain utils.write_json(meta_data, targetPath/IOFiles['metaData']) status = {'Status':'Success','retrain': retrain, 'runId':meta_data['monitoring']['runId']} log.info(f'output: {status}') return json.dumps(status) """ def getMainCodeModules(self): modules = [{'module':'Path', 'mod_from':'pathlib'} ,{'module':'pandas','mod_as':'pd'} ,{'module':'json'} ] return modules def addMainCode(self, indent=1): self.codeText += """ if __name__ == '__main__': config = validateConfig() targetPath = Path('aion') / config['targetPath'] targetPath.mkdir(parents=True, exist_ok=True) log_file = targetPath / IOFiles['log'] log = utils.logger(log_file, mode='a', logger_name=Path(__file__).parent.stem) try: print(monitoring(config, targetPath, log)) except Exception as e: status = {'Status': 'Failure', 'Message': str(e)} print(json.dumps(status)) """ def addStatement(self, statement, indent=1): self.codeText += f"\n{self.tab * indent}{statement}" def getCode(self, indent=1): return self.function_code + '\n' + self.codeText
data_reader.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from .imports import importModule supported_reader = ['sqlite', 'influx','s3'] functions_code = { 'dataReader':{'imports':[{'mod':'json'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'}],'code':""" class dataReader(): def get_reader(self, reader_type, target_path=None, config=None): if reader_type == 'sqlite': return sqlite_writer(target_path=target_path) elif reader_type == 'influx': return Influx_writer(config=config) elif reader_type == 'gcs': return gcs(config=config) elif reader_type == 'azure': return azure(config=config) elif reader_type == 's3': return s3bucket(config=config) else: raise ValueError(reader_type) """ }, 'sqlite':{'imports':[{'mod':'sqlite3'},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None}],'code':"""\n\ class sqlite_writer(): def __init__(self, target_path): self.target_path = Path(target_path) database_file = self.target_path.stem + '.db' self.db = sqlite_db(self.target_path, database_file) def file_exists(self, file): if file: return self.db.table_exists(file) else: return False def read(self, file): return self.db.read(file) def write(self, data, file): self.db.write(data, file) def close(self): self.db.close() class sqlite_db(): def __init__(self, location, database_file=None): if not isinstance(location, Path): location = Path(location) if database_file: self.database_name = database_file else: self.database_name = location.stem + '.db' db_file = str(location/self.database_name) self.conn = sqlite3.connect(db_file) self.cursor = self.conn.cursor() self.tables = [] def table_exists(self, name): if name in self.tables: return True elif name: query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';" listOfTables = self.cursor.execute(query).fetchall() if len(listOfTables) > 0 : self.tables.append(name) return True return False def read(self, table_name): return pd.read_sql_query(f"SELECT * FROM {table_name}", self.conn) def create_table(self,name, columns, dtypes): query = f'CREATE TABLE IF NOT EXISTS {name} (' for column, data_type in zip(columns, dtypes): query += f"'{column}' TEXT," query = query[:-1] query += ');' self.conn.execute(query) return True def write(self,data, table_name): if not self.table_exists(table_name): self.create_table(table_name, data.columns, data.dtypes) tuple_data = list(data.itertuples(index=False, name=None)) insert_query = f'INSERT INTO {table_name} VALUES(' for i in range(len(data.columns)): insert_query += '?,' insert_query = insert_query[:-1] + ')' self.cursor.executemany(insert_query, tuple_data) self.conn.commit() return True def delete(self, name): pass def close(self): self.conn.close() """ }, 'influx':{'imports':[{'mod':'InfluxDBClient','mod_from':'influxdb'},{'mod': 'Path', 'mod_from': 'pathlib', 'mod_as': None},{'mod': 'pandas', 'mod_from': None, 'mod_as': 'pd'}],'code':"""\n\ class Influx_writer(): def __init__(self, config): self.db = influx_db(config) def file_exists(self, file): if file: return self.db.table_exists(file) else: return False def read(self, file): query = "SELECT * FROM {}".format(file) if 'read_time' in self.db_config.keys() and self.db_config['read_time']: query += f" time > now() - {self.db_config['read_time']}" return self.db.read(query) def write(self, data, file): self.db.write(data, file) def close(self): pass class influx_db(): def __init__(self, config): self.host = config['host'] self.port = config['port'] self.user = config.get('user', None) self.password = config.get('password', None) self.token = config.get('token', None) self.database = config['database'] self.measurement = config['measurement'] self.tags = config['tags'] self.client = self.get_client() def table_exists(self, name): query = f"SHOW MEASUREMENTS ON {self.database}" result = self.client(query) for measurement in result['measurements']: if measurement['name'] == name: return True return False def read(self, query)->pd.DataFrame: cursor = self.client.query(query) points = cursor.get_points() my_list=list(points) df=pd.DataFrame(my_list) return df def get_client(self): headers = None if self.token: headers={"Authorization": self.token} client = InfluxDBClient(self.host,self.port,self.user, self.password,headers=headers) databases = client.get_list_database() databases = [x['name'] for x in databases] if self.database not in databases: client.create_database(self.database) return InfluxDBClient(self.host,self.port,self.user,self.password,self.database,headers=headers) def write(self,data, measurement=None): if isinstance(data, pd.DataFrame): sorted_col = data.columns.tolist() sorted_col.sort() data = data[sorted_col] data = data.to_dict(orient='records') if not measurement: measurement = self.measurement for row in data: if 'time' in row.keys(): p = '%Y-%m-%dT%H:%M:%S.%fZ' time_str = datetime.strptime(row['time'], p) del row['time'] else: time_str = None if 'model_ver' in row.keys(): self.tags['model_ver']= row['model_ver'] del row['model_ver'] json_body = [{ 'measurement': measurement, 'time': time_str, 'tags': self.tags, 'fields': row }] self.client.write_points(json_body) def delete(self, name): pass def close(self): self.client.close() """ }, 's3':{'imports':[{'mod':'boto3'},{'mod': 'ClientError', 'mod_from': 'botocore.exceptions'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':"""\n\ class s3bucket(): def __init__(self, config={}): if 's3' in config.keys(): config = config['s3'] aws_access_key_id = config.get('aws_access_key_id','') aws_secret_access_key = config.get('aws_secret_access_key','') bucket_name = config.get('bucket_name','') if not aws_access_key_id: raise ValueError('aws_access_key_id can not be empty') if not aws_secret_access_key: raise ValueError('aws_secret_access_key can not be empty') self.client = boto3.client('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=str(aws_secret_access_key)) self.bucket_name = bucket_name def read(self, file_name): try: response = self.client.get_object(Bucket=self.bucket_name, Key=file_name) return pd.read_csv(response['Body']) except ClientError as ex: if ex.response['Error']['Code'] == 'NoSuchBucket': raise ValueError(f"Bucket '{self.bucket_name}' not found in aws s3 storage") elif ex.response['Error']['Code'] == 'NoSuchKey': raise ValueError(f"File '{file_name}' not found in s3 bucket '{self.bucket_name}'") else: raise """ }, 'azure':{'imports':[{'mod':'DataLakeServiceClient', 'mod_from':'azure.storage.filedatalake'},{'mod':'detect', 'mod_from':'detect_delimiter'},{'mod':'pandavro', 'mod_as':'pdx'},{'mod':'io'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':"""\n\ def azure(): def __init__(self,config={}): if 'azure' in config.keys(): config = config['azure'] account_name = config.get('account_name','') account_key = config.get('account_key','') container_name = config.get('container_name','') if not account_name: raise ValueError('Account name can not be empty') if not account_key: raise ValueError('Account key can not be empty') if not container_name: raise ValueError('Container name can not be empty') service_client = DataLakeServiceClient(account_url="{}://{}.dfs.core.windows.net".format("https", account_name), credential=account_key) self.file_system_client = service_client.get_file_system_client(container_name) def read(self, directory_name): root_dir = str(directory_name) file_paths = self.file_system_client.get_paths(path=root_dir) main_df = pd.DataFrame() for path in file_paths: if not path.is_directory: file_client = file_system_client.get_file_client(path.name) file_ext = Path(path.name).suffix if file_ext in [".csv", ".tsv"]: with open(csv_local, "wb") as my_file: file_client.download_file().readinto(my_file) with open(csv_local, 'r') as file: data = file.read() row_delimiter = detect(text=data, default=None, whitelist=[',', ';', ':', '|', '\t']) processed_df = pd.read_csv(csv_local, sep=row_delimiter) elif file_ext == ".parquet": stream = io.BytesIO() file_client.download_file().readinto(stream) processed_df = pd.read_parquet(stream, engine='pyarrow') elif file_ext == ".avro": with open(avro_local, "wb") as my_file: file_client.download_file().readinto(my_file) processed_df = pdx.read_avro(avro_local) if main_df.empty: main_df = pd.DataFrame(processed_df) else: main_df = main_df.append(processed_df, ignore_index=True) return main_df """ }, 'gcs':{'imports':[{'mod':'storage','mod_from':'google.cloud'},{'mod': 'Path', 'mod_from': 'pathlib'},{'mod': 'pandas', 'mod_as': 'pd'}],'code':"""\n\ class gcs(): def __init__(self, config={}): if 'gcs' in config.keys(): config = config['gcs'] account_key = config.get('account_key','') bucket_name = config.get('bucket_name','') if not account_key: raise ValueError('Account key can not be empty') if not bucket_name: raise ValueError('bucket name can not be empty') storage_client = storage.Client.from_service_account_json(account_key) self.bucket = storage_client.get_bucket(bucket_name) def read(self, bucket_name, file_name): data = self.bucket.blob(file_name).download_as_text() return pd.read_csv(data, encoding = 'utf-8', sep = ',') """ } } class data_reader(): def __init__(self, reader_type=[]): self.supported_readers = supported_reader if isinstance(reader_type, str): self.readers = [reader_type] elif not reader_type: self.readers = self.supported_readers else: self.readers = reader_type unsupported_reader = [ x for x in self.readers if x not in self.supported_readers] if unsupported_reader: raise ValueError(f"reader type '{unsupported_reader}' is not supported\nSupported readers are {self.supported_readers}") self.codeText = "" self.importer = importModule() def get_reader_code(self, readers): reader_code = { 'sqlite': 'return sqlite_writer(target_path=target_path)', 'influx': 'return Influx_writer(config=config)', 'gcs': 'return gcs(config=config)', 'azure': 'return azure(config=config)', 's3': 'return s3bucket(config=config)' } code = "\n\ndef dataReader(reader_type, target_path=None, config=None):\n" for i, reader in enumerate(readers): if not i: code += f" if reader_type == '{reader}':\n" else: code += f" elif reader_type == '{reader}':\n" code += f" {reader_code[reader]}\n" if readers: code += " else:\n" code += f""" raise ValueError("'{{reader_type}}' not added during code generation")\n""" else: code += f""" raise ValueError("'{{reader_type}}' not added during code generation")\n""" return code def get_code(self): code = self.get_reader_code(self.readers) functions = [] for reader in self.readers: functions.append(reader) for function in functions: code += self.get_function_code(function) self.codeText += self.importer.getCode() self.codeText += code return self.codeText def get_function_code(self, name): code = "" if name in functions_code.keys(): code += functions_code[name]['code'] if self.importer: if 'imports' in functions_code[name].keys(): for module in functions_code[name]['imports']: mod_name = module['mod'] mod_from = module.get('mod_from', None) mod_as = module.get('mod_as', None) self.importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as) return code def get_importer(self): return self.importer
imports.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from importlib.metadata import version import sys class importModule(): def __init__(self): self.importModule = {} self.stdlibModule = [] self.localModule = {} def addLocalModule(self,module, mod_from=None, mod_as=None): if module == '*': if module not in self.localModule.keys(): self.localModule[module]= [mod_from] else: self.localModule[module].append(mod_from) elif module not in self.localModule.keys(): self.localModule[module] = {'from':mod_from, 'as':mod_as} def addModule(self, module, mod_from=None, mod_as=None): if module not in self.importModule.keys(): self.importModule[module] = {'from':mod_from, 'as':mod_as} if module in sys.stdlib_module_names: self.stdlibModule.append(module) elif isinstance(self.importModule[module], list): if mod_as not in [x['as'] for x in self.importModule[module]]: self.importModule[module].append({'from':mod_from, 'as':mod_as}) elif mod_as not in [x['from'] for x in self.importModule[module]]: self.importModule[module].append({'from':mod_from, 'as':mod_as}) elif mod_as != self.importModule[module]['as']: as_list = [self.importModule[module]] as_list.append({'from':mod_from, 'as':mod_as}) self.importModule[module] = as_list elif mod_from != self.importModule[module]['from']: as_list = [self.importModule[module]] as_list.append({'from':mod_from, 'as':mod_as}) self.importModule[module] = as_list def getModules(self): return (self.importModule, self.stdlibModule) def getBaseModule(self, extra_importers=[]): modules_alias = { 'sklearn':'scikit-learn', 'genetic_selection':'sklearn-genetic', 'google': 'google-cloud-storage', 'azure':'azure-storage-file-datalake'} local_modules = {'AIX':'/app/AIX-0.1-py3-none-any.whl'} modules = [] require = "" if extra_importers: extra_importers = [importer.importModule for importer in extra_importers if isinstance(importer, importModule)] importers_module = [self.importModule] + extra_importers for importer_module in importers_module: for k,v in importer_module.items(): if v['from']: mod = v['from'].split('.')[0] else: mod = k if mod in modules_alias.keys(): mod = modules_alias[mod] modules.append(mod) modules = list(set(modules)) for mod in modules: try: if mod in local_modules.keys(): require += f"{local_modules[mod]}\n" else: require += f"{mod}=={version(mod)}\n" except : if mod not in sys.stdlib_module_names: raise return require def getCode(self): def to_string(k, v): mod = '' if v['from']: mod += 'from {} '.format(v['from']) mod += 'import {}'.format(k) if v['as']: mod += ' as {} '.format(v['as']) return mod modules = "" local_modules = "" std_lib_modules = "" third_party_modules = "" for k,v in self.importModule.items(): if k in self.stdlibModule: std_lib_modules = std_lib_modules + '\n' + to_string(k, v) elif isinstance(v, dict): third_party_modules = third_party_modules + '\n' + to_string(k, v) elif isinstance(v, list): for alias in v: third_party_modules = third_party_modules + '\n' + to_string(k, alias) for k,v in self.localModule.items(): if k != '*': local_modules = local_modules + '\n' + to_string(k, v) else: for mod_from in v: local_modules = local_modules + '\n' + f'from {mod_from} import {k}' if std_lib_modules: modules = modules + "\n#Standard Library modules" + std_lib_modules if third_party_modules: modules = modules + "\n\n#Third Party modules" + third_party_modules if local_modules: modules = modules + "\n\n#local modules" + local_modules + '\n' return modules def copyCode(self, importer): self.importModule, self.stdlibModule = importer.getModules()
transformer.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ import json class transformer(): def __init__(self, indent=0, tab_size=4): self.df_name = 'df' self.tab = ' ' * tab_size self.codeText = "" self.transformers = [] self.TxCols = [] self.imputers = {} self.input_files = {} self.output_files = {} self.function_code = '' self.addInputFiles({'inputData' : 'rawData.dat', 'metaData' : 'modelMetaData.json','log' : 'aion.log','transformedData' : 'transformedData.dat','normalization' : 'normalization.pkl'}) def addInputFiles(self, files): if not isinstance(files, dict): raise TypeError(f"Required dict type got {type(files)} type") for k,v in files.items(): self.input_files[k] = v def addOutputFiles(self, files): if not isinstance(files, dict): raise TypeError(f"Required dict type got {type(files)} type") for k,v in files.items(): self.input_files[k] = v def getInputFiles(self): text = 'IOFiles = ' if not self.input_files: text += '{ }' else: text += json.dumps(self.input_files, indent=4) return text def getOutputFiles(self): text = 'output_file = ' if not self.output_files: text += '{ }' else: text += json.dumps(self.output_files, indent=4) return text def getInputOutputFiles(self, indent=0): text = '\n' text += self.getInputFiles() if indent: text = text.replace('\n', self.tab * indent + '\n') return text def __addValidateConfigCode(self): text = "\n\ \ndef validateConfig():\ \n config_file = Path(__file__).parent/'config.json'\ \n if not Path(config_file).exists():\ \n raise ValueError(f'Config file is missing: {config_file}')\ \n config = read_json(config_file)\ \n return config" return text def getPrefixModules(self): modules = [ {'module':'Path', 'mod_from':'pathlib'} ,{'module':'pandas', 'mod_as':'pd'} ,{'module':'warnings'} ,{'module':'json'} ,{'module':'logging'} ,{'module':'joblib'} ,{'module':'MinMaxScaler', 'mod_from':'sklearn.preprocessing'} ] return modules def addPrefixCode(self, indent=1): self.codeText += """ def transformation(config, targetPath, log): dataLoc = targetPath / IOFiles['inputData'] if not dataLoc.exists(): return {'Status': 'Failure', 'Message': 'Data location does not exists.'} df = read_data(dataLoc) log.log_dataframe(df) target_feature = config['target_feature'] dateTimeFeature=config['dateTimeFeature'] df.set_index(dateTimeFeature, inplace=True) df = df.dropna() df=df.fillna(df.mean()) if len(target_feature) == 1: trainX = df[target_feature].to_numpy().reshape(-1,1) else: trainX = df[target_feature].to_numpy() scaler = MinMaxScaler(feature_range=(0, 1)) trainX = scaler.fit_transform(trainX) normalization_file_name = str(targetPath / IOFiles['normalization']) joblib.dump(scaler, normalization_file_name) df[target_feature] = trainX log.log_dataframe(df) csv_path = str(targetPath / IOFiles['transformedData']) write_data(df, csv_path, index=True) status = {'Status': 'Success', 'DataFilePath': IOFiles['transformedData'], 'target_feature': target_feature,'dateTimeFeature':dateTimeFeature, "Normalization_file":normalization_file_name } meta_data['transformation'] = {} meta_data['transformation']['Status'] = status write_json(meta_data, str(targetPath / IOFiles['metaData'])) log.info(f'Transformed data saved at {csv_path}') log.info(f'output: {status}') return json.dumps(status) """ def getMainCodeModules(self): modules = [{'module':'Path', 'mod_from':'pathlib'} ,{'module':'sys'} ,{'module':'json'} ,{'module':'logging'} ,{'module':'argparse'} ] return modules def addMainCode(self, indent=1): self.codeText += """ if __name__ == '__main__': config = validateConfig() targetPath = Path('aion') / config['targetPath'] if not targetPath.exists(): raise ValueError(f'targetPath does not exist') meta_data_file = targetPath / IOFiles['metaData'] if meta_data_file.exists(): meta_data = read_json(meta_data_file) else: raise ValueError(f'Configuration file not found: {meta_data_file}') log_file = targetPath / IOFiles['log'] log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem) try: print(transformation(config, targetPath, log)) except Exception as e: status = {'Status': 'Failure', 'Message': str(e)} print(json.dumps(status)) """ def addValidateConfigCode(self, indent=1): self.function_code += self.__addValidateConfigCode() def addLocalFunctionsCode(self): self.addValidateConfigCode() def addStatement(self, statement, indent=1): self.codeText += '\n' + self.tab * indent + statement def getCode(self, indent=1): return self.function_code + '\n' + self.codeText def getDFName(self): return self.df_name
functions.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ class global_function(): def __init__(self, tab_size=4): self.tab = ' ' * tab_size self.codeText = "" self.available_functions = { 'iqr':{'name':'iqrOutlier','code':f"\n\ndef iqrOutlier(df):\ \n{self.tab}Q1 = df.quantile(0.25)\ \n{self.tab}Q3 = df.quantile(0.75)\ \n{self.tab}IQR = Q3 - Q1\ \n{self.tab}index = ~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR))).any(axis=1)\ \n{self.tab}return index"}, 'zscore':{'name':'zscoreOutlier','imports':[{'mod':'stats','mod_from':'scipy'},{'mod':'numpy'}],'code':f"\n\ndef zscoreOutlier(df):\ \n{self.tab}z = numpy.abs(stats.zscore(df))\ \n{self.tab}index = (z < 3).all(axis=1)\ \n{self.tab}return index"}, 'iforest':{'name':'iforestOutlier','imports':[{'mod':'IsolationForest','mod_from':'sklearn.ensemble'}],'code':f"\n\ndef iforestOutlier(df):\ \n{self.tab}from sklearn.ensemble import IsolationForest\ \n{self.tab}isolation_forest = IsolationForest(n_estimators=100)\ \n{self.tab}isolation_forest.fit(df)\ \n{self.tab}y_pred_train = isolation_forest.predict(df)\ \n{self.tab}return y_pred_train == 1"}, 'minMaxImputer':{'name':'minMaxImputer','code':f"\n\nclass minMaxImputer(TransformerMixin):\ \n{self.tab}def __init__(self, strategy='max'):\ \n{self.tab}{self.tab}self.strategy = strategy\ \n{self.tab}def fit(self, X, y=None):\ \n{self.tab}{self.tab}self.feature_names_in_ = X.columns\ \n{self.tab}{self.tab}if self.strategy == 'min':\ \n{self.tab}{self.tab}{self.tab}self.statistics_ = X.min()\ \n{self.tab}{self.tab}else:\ \n{self.tab}{self.tab}{self.tab}self.statistics_ = X.max()\ \n{self.tab}{self.tab}return self\ \n{self.tab}def transform(self, X):\ \n{self.tab}{self.tab}import numpy\ \n{self.tab}{self.tab}return numpy.where(X.isna(), self.statistics_, X)"}, 'DummyEstimator':{'name':'DummyEstimator','code':f"\n\nclass DummyEstimator(BaseEstimator):\ \n{self.tab}def fit(self): pass\ \n{self.tab}def score(self): pass"}, 'start_reducer':{'name':'start_reducer','code':"\n\ \ndef start_reducer(df,target_feature,corr_threshold=0.85,var_threshold=0.05):\ \n import numpy as np\ \n import pandas as pd\ \n import itertools\ \n from sklearn.feature_selection import VarianceThreshold\ \n\ \n train_features = df.columns.tolist()\ \n train_features.remove(target_feature)\ \n df = df.loc[:, (df != df.iloc[0]).any()] #remove constant feature\ \n numeric_features = df.select_dtypes(include='number').columns.tolist()\ \n non_numeric_features = df.select_dtypes(exclude='number').columns.tolist()\ \n if numeric_features and var_threshold:\ \n qconstantFilter = VarianceThreshold(threshold=var_threshold)\ \n tempDf=df[numeric_features]\ \n qconstantFilter.fit(tempDf)\ \n numeric_features = [x for x,y in zip(numeric_features,qconstantFilter.get_support()) if y]\ \n if numeric_features:\ \n numColPairs = list(itertools.product(numeric_features, numeric_features))\ \n for item in numColPairs:\ \n if(item[0] == item[1]):\ \n numColPairs.remove(item)\ \n tempArray = []\ \n for item in numColPairs:\ \n tempCorr = np.abs(df[item[0]].corr(df[item[1]]))\ \n if(tempCorr > corr_threshold):\ \n tempArray.append(item[0])\ \n tempArray = np.unique(tempArray).tolist()\ \n nonsimilarNumericalCols = list(set(numeric_features) - set(tempArray))\ \n groupedFeatures = []\ \n if tempArray:\ \n corrDic = {}\ \n for feature in tempArray:\ \n temp = []\ \n for col in tempArray:\ \n tempCorr = np.abs(df[feature].corr(df[col]))\ \n temp.append(tempCorr)\ \n corrDic[feature] = temp\ \n #Similar correlation df\ \n corrDF = pd.DataFrame(corrDic,index = tempArray)\ \n corrDF.loc[:,:] = np.tril(corrDF, k=-1)\ \n alreadyIn = set()\ \n similarFeatures = []\ \n for col in corrDF:\ \n perfectCorr = corrDF[col][corrDF[col] > corr_threshold].index.tolist()\ \n if perfectCorr and col not in alreadyIn:\ \n alreadyIn.update(set(perfectCorr))\ \n perfectCorr.append(col)\ \n similarFeatures.append(perfectCorr)\ \n updatedSimFeatures = []\ \n for items in similarFeatures:\ \n if(target_feature != '' and target_feature in items):\ \n for p in items:\ \n updatedSimFeatures.append(p)\ \n else:\ \n updatedSimFeatures.append(items[0])\ \n newTempFeatures = list(set(updatedSimFeatures + nonsimilarNumericalCols))\ \n updatedFeatures = list(set(newTempFeatures + non_numeric_features))\ \n else:\ \n updatedFeatures = list(set(columns) - set(constFeatures)-set(qconstantColumns))\ \n else:\ \n updatedFeatures = list(set(columns) - set(constFeatures)-set(qconstantColumns))\ \n return updatedFeatures"}, 'feature_importance_class':{'name':'feature_importance_class','code':"\n\ \ndef feature_importance_class(df, numeric_features, cat_features,target_feature,pValTh,corrTh):\ \n import pandas as pd\ \n from sklearn.feature_selection import chi2\ \n from sklearn.feature_selection import f_classif\ \n from sklearn.feature_selection import mutual_info_classif\ \n \ \n impFeatures = []\ \n if cat_features:\ \n categoricalData=df[cat_features]\ \n chiSqCategorical=chi2(categoricalData,df[target_feature])[1]\ \n corrSeries=pd.Series(chiSqCategorical, index=cat_features)\ \n impFeatures.append(corrSeries[corrSeries<pValTh].index.tolist())\ \n if numeric_features:\ \n quantData=df[numeric_features]\ \n fclassScore=f_classif(quantData,df[target_feature])[1]\ \n miClassScore=mutual_info_classif(quantData,df[target_feature])\ \n fClassSeries=pd.Series(fclassScore,index=numeric_features)\ \n miClassSeries=pd.Series(miClassScore,index=numeric_features)\ \n impFeatures.append(fClassSeries[fClassSeries<pValTh].index.tolist())\ \n impFeatures.append(miClassSeries[miClassSeries>corrTh].index.tolist())\ \n pearsonScore=df.corr() \ \n targetPScore=abs(pearsonScore[target_feature])\ \n impFeatures.append(targetPScore[targetPScore<pValTh].index.tolist())\ \n return list(set(sum(impFeatures, [])))"}, 'feature_importance_reg':{'name':'feature_importance_reg','code':"\n\ \ndef feature_importance_reg(df, numeric_features, target_feature,pValTh,corrTh):\ \n import pandas as pd\ \n from sklearn.feature_selection import f_regression\ \n from sklearn.feature_selection import mutual_info_regression\ \n \ \n impFeatures = []\ \n if numeric_features:\ \n quantData =df[numeric_features]\ \n fregScore=f_regression(quantData,df[target_feature])[1]\ \n miregScore=mutual_info_regression(quantData,df[target_feature])\ \n fregSeries=pd.Series(fregScore,index=numeric_features)\ \n miregSeries=pd.Series(miregScore,index=numeric_features)\ \n impFeatures.append(fregSeries[fregSeries<pValTh].index.tolist())\ \n impFeatures.append(miregSeries[miregSeries>corrTh].index.tolist())\ \n pearsonScore=df.corr()\ \n targetPScore=abs(pearsonScore[target_feature])\ \n impFeatures.append(targetPScore[targetPScore<pValTh].index.tolist())\ \n return list(set(sum(impFeatures, [])))"}, 'scoring_criteria':{'name':'scoring_criteria','imports':[{'mod':'make_scorer','mod_from':'sklearn.metrics'},{'mod':'roc_auc_score','mod_from':'sklearn.metrics'}], 'code':"\n\ \ndef scoring_criteria(score_param, problem_type, class_count):\ \n if problem_type == 'classification':\ \n scorer_mapping = {\ \n 'recall':{'binary_class': 'recall', 'multi_class': 'recall_weighted'},\ \n 'precision':{'binary_class': 'precision', 'multi_class': 'precision_weighted'},\ \n 'f1_score':{'binary_class': 'f1', 'multi_class': 'f1_weighted'},\ \n 'roc_auc':{'binary_class': 'roc_auc', 'multi_class': 'roc_auc_ovr_weighted'}\ \n }\ \n if (score_param.lower() == 'roc_auc') and (class_count > 2):\ \n score_param = make_scorer(roc_auc_score, needs_proba=True,multi_class='ovr',average='weighted')\ \n else:\ \n class_type = 'binary_class' if class_count == 2 else 'multi_class'\ \n if score_param in scorer_mapping.keys():\ \n score_param = scorer_mapping[score_param][class_type]\ \n else:\ \n score_param = 'accuracy'\ \n return score_param"}, 'log_dataframe':{'name':'log_dataframe','code':f"\n\ \ndef log_dataframe(df, msg=None):\ \n import io\ \n buffer = io.StringIO()\ \n df.info(buf=buffer)\ \n if msg:\ \n log_text = f'Data frame after {{msg}}:'\ \n else:\ \n log_text = 'Data frame:'\ \n log_text += '\\n\\t'+str(df.head(2)).replace('\\n','\\n\\t')\ \n log_text += ('\\n\\t' + buffer.getvalue().replace('\\n','\\n\\t'))\ \n get_logger().info(log_text)"}, 'BayesSearchCV':{'name':'BayesSearchCV','imports':[{'mod':'cross_val_score','mod_from':'sklearn.model_selection'},{'mod':'fmin','mod_from':'hyperopt'},{'mod':'tpe','mod_from':'hyperopt'},{'mod':'hp','mod_from':'hyperopt'},{'mod':'STATUS_OK','mod_from':'hyperopt'},{'mod':'Trials','mod_from':'hyperopt'},{'mod':'numpy','mod_as':'np'}],'code':"\n\ \nclass BayesSearchCV():\ \n\ \n def __init__(self, estimator, params, scoring, n_iter, cv):\ \n self.estimator = estimator\ \n self.params = params\ \n self.scoring = scoring\ \n self.iteration = n_iter\ \n self.cv = cv\ \n self.best_estimator_ = None\ \n self.best_score_ = None\ \n self.best_params_ = None\ \n\ \n def __min_fun(self, params):\ \n score=cross_val_score(self.estimator, self.X, self.y,scoring=self.scoring,cv=self.cv)\ \n acc = score.mean()\ \n return {'loss':-acc,'score': acc, 'status': STATUS_OK,'model' :self.estimator,'params': params}\ \n\ \n def fit(self, X, y):\ \n trials = Trials()\ \n self.X = X\ \n self.y = y\ \n best = fmin(self.__min_fun,self.params,algo=tpe.suggest, max_evals=self.iteration, trials=trials)\ \n result = sorted(trials.results, key = lambda x: x['loss'])[0]\ \n self.best_estimator_ = result['model']\ \n self.best_score_ = result['score']\ \n self.best_params_ = result['params']\ \n self.best_estimator_.fit(X, y)\ \n\ \n def hyperOptParamConversion( paramSpace):\ \n paramDict = {}\ \n for j in list(paramSpace.keys()):\ \n inp = paramSpace[j]\ \n isLog = False\ \n isLin = False\ \n isRan = False\ \n isList = False\ \n isString = False\ \n try:\ \n # check if functions are given as input and reassign paramspace\ \n v = paramSpace[j]\ \n if 'logspace' in paramSpace[j]:\ \n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\ \n isLog = True\ \n elif 'linspace' in paramSpace[j]:\ \n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\ \n isLin = True\ \n elif 'range' in paramSpace[j]:\ \n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\ \n isRan = True\ \n elif 'list' in paramSpace[j]:\ \n paramSpace[j] = v[v.find('(') + 1:v.find(')')].replace(' ', '')\ \n isList = True\ \n elif '[' and ']' in paramSpace[j]:\ \n paramSpace[j] = v.split('[')[1].split(']')[0].replace(' ', '')\ \n isList = True\ \n x = paramSpace[j].split(',')\ \n except:\ \n x = paramSpace[j]\ \n str_arg = paramSpace[j]\ \n\ \n # check if arguments are string\ \n try:\ \n test = eval(x[0])\ \n except:\ \n isString = True\ \n\ \n if isString:\ \n paramDict.update({j: hp.choice(j, x)})\ \n else:\ \n res = eval(str_arg)\ \n if isLin:\ \n y = eval('np.linspace' + str(res))\ \n paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))})\ \n elif isLog:\ \n y = eval('np.logspace' + str(res))\ \n paramDict.update(\ \n {j: hp.uniform(j, 10 ** eval(x[0]), 10 ** eval(x[1]))})\ \n elif isRan:\ \n y = eval('np.arange' + str(res))\ \n paramDict.update({j: hp.choice(j, y)})\ \n # check datatype of argument\ \n elif isinstance(eval(x[0]), bool):\ \n y = list(map(lambda i: eval(i), x))\ \n paramDict.update({j: hp.choice(j, eval(str(y)))})\ \n elif isinstance(eval(x[0]), float):\ \n res = eval(str_arg)\ \n if len(str_arg.split(',')) == 3 and not isList:\ \n y = eval('np.linspace' + str(res))\ \n #print(y)\ \n paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))})\ \n else:\ \n y = list(res) if isinstance(res, tuple) else [res]\ \n paramDict.update({j: hp.choice(j, y)})\ \n else:\ \n res = eval(str_arg)\ \n if len(str_arg.split(',')) == 3 and not isList:\ \n y = eval('np.linspace' +str(res)) if eval(x[2]) >= eval(x[1]) else eval('np.arange'+str(res))\ \n else:\ \n y = list(res) if isinstance(res, tuple) else [res]\ \n paramDict.update({j: hp.choice(j, y)})\ \n return paramDict"}, 's2n':{'name':'s2n','imports':[{'mod':'word2number','mod_as':'w2n'},{'mod':'numpy','mod_as':'np'}],'code':"\n\ \ndef s2n(value):\ \n try:\ \n x=eval(value)\ \n return x\ \n except:\ \n try:\ \n return w2n.word_to_num(value)\ \n except:\ \n return np.nan"}, 'readWrite':{'name':'readWrite','imports':[{'mod':'json'},{'mod':'pandas','mod_as':'pd'}],'code':"\n\ \ndef read_json(file_path):\ \n data = None\ \n with open(file_path,'r') as f:\ \n data = json.load(f)\ \n return data\ \n\ \ndef write_json(data, file_path):\ \n with open(file_path,'w') as f:\ \n json.dump(data, f)\ \n\ \ndef read_data(file_path, encoding='utf-8', sep=','):\ \n return pd.read_csv(file_path, encoding=encoding, sep=sep)\ \n\ \ndef write_data(data, file_path, index=False):\ \n return data.to_csv(file_path, index=index)\ \n\ \n#Uncomment and change below code for google storage\ \n#def write_data(data, file_path, index=False):\ \n# file_name= file_path.name\ \n# data.to_csv('output_data.csv')\ \n# storage_client = storage.Client()\ \n# bucket = storage_client.bucket('aion_data')\ \n# bucket.blob('prediction/'+file_name).upload_from_filename('output_data.csv', content_type='text/csv')\ \n# return data\ \n\ \ndef is_file_name_url(file_name):\ \n supported_urls_starts_with = ('gs://','https://','http://')\ \n return file_name.startswith(supported_urls_starts_with)\ \n"}, 'logger':{'name':'set_logger','imports':[{'mod':'logging'}],'code':f"\n\ \nlog = None\ \ndef set_logger(log_file, mode='a'):\ \n global log\ \n logging.basicConfig(filename=log_file, filemode=mode, format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S')\ \n log = logging.getLogger(Path(__file__).parent.name)\ \n return log\ \n\ \ndef get_logger():\ \n return log\n"}, 'mlflowSetPath':{'name':'mlflowSetPath','code':f"\n\ndef mlflowSetPath(path, name):\ \n{self.tab}db_name = str(Path(path)/'mlruns')\ \n{self.tab}mlflow.set_tracking_uri('file:///' + db_name)\ \n{self.tab}mlflow.set_experiment(str(Path(path).name))\ \n"}, 'mlflow_create_experiment':{'name':'mlflow_create_experiment','code':f"\n\ndef mlflow_create_experiment(config, path, name):\ \n{self.tab}tracking_uri, artifact_uri, registry_uri = get_mlflow_uris(config, path)\ \n{self.tab}mlflow.tracking.set_tracking_uri(tracking_uri)\ \n{self.tab}mlflow.tracking.set_registry_uri(registry_uri)\ \n{self.tab}client = mlflow.tracking.MlflowClient()\ \n{self.tab}experiment = client.get_experiment_by_name(name)\ \n{self.tab}if experiment:\ \n{self.tab}{self.tab}experiment_id = experiment.experiment_id\ \n{self.tab}else:\ \n{self.tab}{self.tab}experiment_id = client.create_experiment(name, artifact_uri)\ \n{self.tab}return client, experiment_id\ \n"}, 'get_mlflow_uris':{'name':'get_mlflow_uris','code':f"\n\ndef get_mlflow_uris(config, path):\ \n artifact_uri = None\ \n tracking_uri_type = config.get('tracking_uri_type',None)\ \n if tracking_uri_type == 'localDB':\ \n tracking_uri = 'sqlite:///' + str(path.resolve()/'mlruns.db')\ \n elif tracking_uri_type == 'server' and config.get('tracking_uri', None):\ \n tracking_uri = config['tracking_uri']\ \n if config.get('artifacts_uri', None):\ \n if Path(config['artifacts_uri']).exists():\ \n artifact_uri = 'file:' + config['artifacts_uri']\ \n else:\ \n artifact_uri = config['artifacts_uri']\ \n else:\ \n artifact_uri = 'file:' + str(path.resolve()/'mlruns')\ \n else:\ \n tracking_uri = 'file:' + str(path.resolve()/'mlruns')\ \n artifact_uri = None\ \n if config.get('registry_uri', None):\ \n registry_uri = config['registry_uri']\ \n else:\ \n registry_uri = 'sqlite:///' + str(path.resolve()/'registry.db')\ \n return tracking_uri, artifact_uri, registry_uri\ \n"}, 'logMlflow':{'name':'logMlflow','code':f"\n\ndef logMlflow( params, metrices, estimator,tags={{}}, algoName=None):\ \n{self.tab}run_id = None\ \n{self.tab}for k,v in params.items():\ \n{self.tab}{self.tab}mlflow.log_param(k, v)\ \n{self.tab}for k,v in metrices.items():\ \n{self.tab}{self.tab}mlflow.log_metric(k, v)\ \n{self.tab}if 'CatBoost' in algoName:\ \n{self.tab}{self.tab}model_info = mlflow.catboost.log_model(estimator, 'model')\ \n{self.tab}else:\ \n{self.tab}{self.tab}model_info = mlflow.sklearn.log_model(sk_model=estimator, artifact_path='model')\ \n{self.tab}tags['processed'] = 'no'\ \n{self.tab}tags['registered'] = 'no'\ \n{self.tab}mlflow.set_tags(tags)\ \n{self.tab}if model_info:\ \n{self.tab}{self.tab}run_id = model_info.run_id\ \n{self.tab}return run_id\ \n"}, 'classification_metrices':{'name':'classification_metrices','imports':[{'mod':'sklearn'},{'mod':'math'}],'code':"\ndef get_classification_metrices( actual_values, predicted_values):\ \n result = {}\ \n accuracy_score = sklearn.metrics.accuracy_score(actual_values, predicted_values)\ \n avg_precision = sklearn.metrics.precision_score(actual_values, predicted_values,\ \n average='macro')\ \n avg_recall = sklearn.metrics.recall_score(actual_values, predicted_values,\ \n average='macro')\ \n avg_f1 = sklearn.metrics.f1_score(actual_values, predicted_values,\ \n average='macro')\ \n\ \n result['accuracy'] = math.floor(accuracy_score*10000)/100\ \n result['precision'] = math.floor(avg_precision*10000)/100\ \n result['recall'] = math.floor(avg_recall*10000)/100\ \n result['f1'] = math.floor(avg_f1*10000)/100\ \n return result\ \n"}, 'regression_metrices':{'name':'regression_metrices','imports':[{'mod':'numpy', 'mod_as':'np'}],'code':"\ndef get_regression_metrices( actual_values, predicted_values):\ \n result = {}\ \n\ \n me = np.mean(predicted_values - actual_values)\ \n sde = np.std(predicted_values - actual_values, ddof = 1)\ \n\ \n abs_err = np.abs(predicted_values - actual_values)\ \n mae = np.mean(abs_err)\ \n sdae = np.std(abs_err, ddof = 1)\ \n\ \n abs_perc_err = 100.*np.abs(predicted_values - actual_values) / actual_values\ \n mape = np.mean(abs_perc_err)\ \n sdape = np.std(abs_perc_err, ddof = 1)\ \n\ \n result['mean_error'] = me\ \n result['mean_abs_error'] = mae\ \n result['mean_abs_perc_error'] = mape\ \n result['error_std'] = sde\ \n result['abs_error_std'] = sdae\ \n result['abs_perc_error_std'] = sdape\ \n return result\ \n"} } def add_function(self, name, importer=None): if name in self.available_functions.keys(): self.codeText += self.available_functions[name]['code'] if importer: if 'imports' in self.available_functions[name].keys(): for module in self.available_functions[name]['imports']: mod_name = module['mod'] mod_from = module.get('mod_from', None) mod_as = module.get('mod_as', None) importer.addModule(mod_name, mod_from=mod_from, mod_as=mod_as) def get_function_name(self, name): if name in self.available_functions.keys(): return self.available_functions[name]['name'] return None def getCode(self): return self.codeText
register.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ import json class register(): def __init__(self, importer, indent=0, tab_size=4): self.tab = " "*tab_size self.codeText = "" self.function_code = "" self.importer = importer self.input_files = {} self.output_files = {} self.addInputFiles({'log' : 'aion.log', 'metaData' : 'modelMetaData.json','metrics': 'metrics.json','production': 'production.json'}) def addInputFiles(self, files): if not isinstance(files, dict): raise TypeError(f"Required dict type got {type(files)} type") for k,v in files.items(): self.input_files[k] = v def addOutputFiles(self, files): if not isinstance(files, dict): raise TypeError(f"Required dict type got {type(files)} type") for k,v in files.items(): self.input_files[k] = v def getInputFiles(self): text = 'IOFiles = ' if not self.input_files: text += '{ }' else: text += json.dumps(self.input_files, indent=4) return text def getOutputFiles(self): text = 'output_file = ' if not self.output_files: text += '{ }' else: text += json.dumps(self.output_files, indent=4) return text def getInputOutputFiles(self, indent=0): text = '\n' text += self.getInputFiles() if indent: text = text.replace('\n', self.tab * indent + '\n') return text def __addValidateConfigCode(self, models=None): text = "\n\ \ndef validateConfig():\ \n config_file = Path(__file__).parent/'config.json'\ \n if not Path(config_file).exists():\ \n raise ValueError(f'Config file is missing: {config_file}')\ \n config = utils.read_json(config_file)\ \n return config\ " return text def addLocalFunctionsCode(self, models): self.function_code += self.__addValidateConfigCode(models) def addPrefixCode(self, smaller_is_better=False, indent=1): compare = 'min' if smaller_is_better else 'max' self.codeText += f""" def get_best_model(run_path): models_path = [d for d in run_path.iterdir() if d.is_dir] scores = {{}} for model in models_path: metrics = utils.read_json(model/IOFiles['metrics']) if metrics.get('score', None): scores[model.stem] = metrics['score'] best_model = {compare}(scores, key=scores.get) return best_model def __merge_logs(log_file_sequence,path, files): if log_file_sequence['first'] in files: with open(path/log_file_sequence['first'], 'r') as f: main_log = f.read() files.remove(log_file_sequence['first']) for file in files: with open(path/file, 'r') as f: main_log = main_log + f.read() (path/file).unlink() with open(path/log_file_sequence['merged'], 'w') as f: f.write(main_log) def merge_log_files(folder, models): log_file_sequence = {{ 'first': 'aion.log', 'merged': 'aion.log' }} log_file_suffix = '_aion.log' log_files = [x+log_file_suffix for x in models if (folder/(x+log_file_suffix)).exists()] log_files.append(log_file_sequence['first']) __merge_logs(log_file_sequence, folder, log_files) def register(config, targetPath, log): meta_data_file = targetPath / IOFiles['metaData'] if meta_data_file.exists(): meta_data = utils.read_json(meta_data_file) else: raise ValueError(f'Configuration file not found: {{meta_data_file}}') run_id = meta_data['monitoring']['runId'] usecase = config['targetPath'] current_run_path = targetPath/'runs'/str(run_id) register_model_name = get_best_model(current_run_path) models = config['models'] merge_log_files(targetPath, models) meta_data['register'] = {{'runId':run_id, 'model': register_model_name}} utils.write_json(meta_data, targetPath/IOFiles['metaData']) utils.write_json({{'Model':register_model_name,'runNo':str(run_id)}}, targetPath/IOFiles['production']) status = {{'Status':'Success','Message':f'Model Registered: {{register_model_name}}'}} log.info(f'output: {{status}}') return json.dumps(status) """ def getMainCodeModules(self): modules = [{'module':'Path', 'mod_from':'pathlib'} ,{'module':'json'} ] return modules def addMainCode(self, models, indent=1): self.codeText += """ if __name__ == '__main__': config = validateConfig() targetPath = Path('aion') / config['targetPath'] if not targetPath.exists(): raise ValueError(f'targetPath does not exist') log_file = targetPath / IOFiles['log'] log = utils.logger(log_file, mode='a', logger_name=Path(__file__).parent.stem) try: print(register(config, targetPath, log)) except Exception as e: status = {'Status': 'Failure', 'Message': str(e)} print(json.dumps(status)) """ def addStatement(self, statement, indent=1): self.codeText += f"\n{self.tab * indent}{statement}" def getCode(self, indent=1): return self.function_code + '\n' + self.codeText
__init__.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from mlac.timeseries.core.imports import importModule from mlac.timeseries.core.load_data import tabularDataReader from mlac.timeseries.core.transformer import transformer as profiler from mlac.timeseries.core.selector import selector from mlac.timeseries.core.trainer import learner from mlac.timeseries.core.register import register from mlac.timeseries.core.deploy import deploy from mlac.timeseries.core.drift_analysis import drift from mlac.timeseries.core.functions import global_function from mlac.timeseries.core.data_reader import data_reader from mlac.timeseries.core.utility import utility_function
load_data.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ import json class tabularDataReader(): def __init__(self, tab_size=4): self.tab = ' ' * tab_size self.function_code = '' self.codeText = '' self.code_generated = False def getInputFiles(self): IOFiles = { "rawData": "rawData.dat", "metaData" : "modelMetaData.json", "log" : "aion.log", "outputData" : "rawData.dat", "monitoring":"monitoring.json", "prodData": "prodData", "prodDataGT":"prodDataGT" } text = 'IOFiles = ' if not IOFiles: text += '{ }' else: text += json.dumps(IOFiles, indent=4) return text def getOutputFiles(self): output_files = { 'metaData' : 'modelMetaData.json', 'log' : 'aion.log', 'outputData' : 'rawData.dat' } text = 'output_file = ' if not output_files: text += '{ }' else: text += json.dumps(output_files, indent=4) return text def getInputOutputFiles(self, indent=0): text = '\n' text += self.getInputFiles() if indent: text = text.replace('\n', self.tab * indent + '\n') return text def __addValidateConfigCode(self): text = "\n\ \ndef validateConfig():\ \n config_file = Path(__file__).parent/'config.json'\ \n if not Path(config_file).exists():\ \n raise ValueError(f'Config file is missing: {config_file}')\ \n config = read_json(config_file)\ \n if not config['targetPath']:\ \n raise ValueError(f'Target Path is not configured')\ \n return config" return text def addMainCode(self, indent=1): self.codeText += """ if __name__ == '__main__': config = validateConfig() targetPath = Path('aion') / config['targetPath'] targetPath.mkdir(parents=True, exist_ok=True) if not targetPath.exists(): raise ValueError(f'targetPath does not exist') meta_data_file = targetPath / IOFiles['metaData'] if not meta_data_file.exists(): raise ValueError(f'Configuration file not found: {meta_data_file}') log_file = targetPath / IOFiles['log'] log = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem) try: print(load_data(config, targetPath, log)) except Exception as e: status = {'Status': 'Failure', 'Message': str(e)} print(json.dumps(status)) """ def addLoadDataCode(self): self.codeText += """ #This function will read the data and save the data on persistent storage def load_data(config, targetPath, log): meta_data_file = targetPath / IOFiles['metaData'] meta_data = read_json(meta_data_file) if meta_data.get('monitoring', False) and not meta_data['monitoring'].get('retrain', False): raise ValueError('New data is not enougth to retrain model') df = read_data(config['dataLocation']) status = {} output_data_path = targetPath / IOFiles['outputData'] log.log_dataframe(df) required_features = list(set(config['selected_features'] + config['dateTimeFeature'] + config['target_feature'])) log.info('Dataset features required: ' + ','.join(required_features)) missing_features = [x for x in required_features if x not in df.columns.tolist()] if missing_features: raise ValueError(f'Some feature/s is/are missing: {missing_features}') log.info('Removing unused features: ' + ','.join(list(set(df.columns) - set(required_features)))) df = df[required_features] log.info(f'Required features: {required_features}') try: log.info(f'Saving Dataset: {str(output_data_path)}') write_data(df, output_data_path, index=False) status = {'Status': 'Success', 'DataFilePath': IOFiles['outputData'], 'Records': len(df)} except: raise ValueError('Unable to create data file') meta_data['load_data'] = {} meta_data['load_data']['selected_features'] = [x for x in config['selected_features'] if x != config['target_feature']] meta_data['load_data']['Status'] = status write_json(meta_data, meta_data_file) output = json.dumps(status) log.info(output) return output """ def addValidateConfigCode(self, indent=1): self.function_code += self.__addValidateConfigCode() def addLocalFunctionsCode(self): self.addValidateConfigCode() def addStatement(self, statement, indent=1): self.codeText += '\n' + self.tab * indent + statement def getCode(self): return self.function_code + '\n' + self.codeText
deploy.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from pathlib import Path import json from mlac.timeseries.core import * from .utility import * def get_deploy_params(config): param_keys = ["modelVersion","problem_type","target_feature","lag_order","noofforecasts"] data = {key:value for (key,value) in config.items() if key in param_keys} data['targetPath'] = config['modelName'] data['ipAddress'] = '127.0.0.1' data['portNo'] = '8094' return data def import_trainer_module(importer): non_sklearn_modules = get_variable('non_sklearn_modules') if non_sklearn_modules: for mod in non_sklearn_modules: module = get_module_mapping(mod) mod_from = module.get('mod_from',None) mod_as = module.get('mod_as',None) importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as) imported_modules = [ ] def run_deploy(config): generated_files = [] importer = importModule() deployer = deploy() importModules(importer, imported_modules) usecase = config['modelName']+'_'+config['modelVersion'] deploy_path = Path(config["deploy_path"])/'MLaC'/'ModelServing' deploy_path.mkdir(parents=True, exist_ok=True) # create the utility file importer.addLocalModule('utility', mod_as='utils') utility_obj = utility_function('Prediction') with open(deploy_path/"utility.py", 'w') as f: f.write(file_header(usecase) + utility_obj.get_code()) generated_files.append("utility.py") # create empty init file required for creating a package with open(deploy_path/"__init__.py", 'w') as f: f.write(file_header(usecase)) generated_files.append("__init__.py") importModules(importer,deployer.getPredictionCodeModules()) code = file_header(usecase) code += importer.getCode() code += deployer.getInputOutputFiles() deployer.addPredictionCode() code += deployer.getCode() # create prediction file with open(deploy_path/"predict.py", 'w') as f: f.write(code) generated_files.append("predict.py") # create create service file with open(deploy_path/"aionCode.py", 'w') as f: f.write(file_header(usecase) + deployer.getServiceCode()) generated_files.append("aionCode.py") importer.addModule('seaborn') importer.addModule('sklearn') # create requirements file req_file = deploy_path/"requirements.txt" with open(req_file, "w") as f: req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()]) f.write(req) generated_files.append("requirements.txt") # create config file config_file = deploy_path/"config.json" config_data = get_deploy_params(config) with open (config_file, "w") as f: json.dump(config_data, f, indent=4) generated_files.append("config.json") # create docker file create_docker_file('Prediction', deploy_path,config['modelName'], generated_files)
trainer.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from pathlib import Path import json from mlac.timeseries.core import * from mlac.timeseries.app import utility as utils def get_model_name(algo, method): if method == 'modelBased': return algo + '_' + 'MLBased' if method == 'statisticalBased': return algo + '_' + 'StatisticsBased' else: return algo def get_training_params(config, algo): param_keys = ["modelVersion","problem_type","target_feature","train_features","scoring_criteria","test_ratio","optimization_param","dateTimeFeature"]#BugID:13217 data = {key:value for (key,value) in config.items() if key in param_keys} data['algorithms'] = {algo: config['algorithms'][algo]} data['targetPath'] = config['modelName'] return data def update_score_comparer(scorer): smaller_is_better_scorer = ['neg_mean_squared_error','mse','neg_root_mean_squared_error','rmse','neg_mean_absolute_error','mae'] if scorer.lower() in smaller_is_better_scorer: utils.update_variable('smaller_is_better', True) else: utils.update_variable('smaller_is_better', False) def run_trainer(config): trainer = learner() importer = importModule() function = global_function() utils.importModules(importer,trainer.getPrefixModules()) update_score_comparer(config['scoring_criteria']) model_name = list(config['algorithms'].keys())[0] if model_name == 'MLP': utils.importModules(importer,trainer.getMlpCodeModules()) trainer.addMlpCode() elif model_name == 'LSTM': utils.importModules(importer,trainer.getLstmCodeModules()) trainer.addLstmCode() trainer.addMainCode() usecase = config['modelName']+'_'+config['modelVersion'] deploy_path = Path(config["deploy_path"])/'MLaC'/('ModelTraining'+'_' + model_name) deploy_path.mkdir(parents=True, exist_ok=True) generated_files = [] # create the utility file importer.addLocalModule('utility', mod_as='utils') utility_obj = utility_function('train') with open(deploy_path/"utility.py", 'w') as f: f.write(utils.file_header(usecase) + utility_obj.get_code()) generated_files.append("utility.py") # create empty init file to make a package with open(deploy_path/"__init__.py", 'w') as f: f.write(utils.file_header(usecase)) generated_files.append("__init__.py") importer.addModule("warnings") code = importer.getCode() code += 'warnings.filterwarnings("ignore")\n' code += f"\nmodel_name = '{model_name}'\n" utils.append_variable('models_name',model_name) out_files = {'log':f'{model_name}_aion.log','model':f'{model_name}_model.pkl','metrics':'metrics.json','metaDataOutput':f'{model_name}_modelMetaData.json','production':'production.json'} trainer.addOutputFiles(out_files) code += trainer.getInputOutputFiles() code += function.getCode() trainer.addLocalFunctionsCode() code += trainer.getCode() with open(deploy_path/"aionCode.py", "w") as f: f.write(code) generated_files.append("aionCode.py") with open(deploy_path/"requirements.txt", "w") as f: req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()]) f.write(req) generated_files.append("requirements.txt") with open (deploy_path/"config.json", "w") as f: json.dump(get_training_params(config, model_name), f, indent=4) generated_files.append("config.json") utils.create_docker_file('train', deploy_path,config['modelName'], generated_files)
selector.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from pathlib import Path import json import platform from mlac.timeseries.core import * from .utility import * output_file_map = { 'feature_reducer' : {'feature_reducer' : 'feature_reducer.pkl'} } def get_selector_params(config): param_keys = ["modelVersion","problem_type","target_feature","train_features","cat_features","n_components"] data = {key:value for (key,value) in config.items() if key in param_keys} data['targetPath'] = config['modelName'] return data def run_selector(config): select = selector() importer = importModule() function = global_function() importModules(importer,select.getPrefixModules()) importModules(importer, select.getSuffixModules()) importModules(importer, select.getMainCodeModules()) select.addPrefixCode() select.addSuffixCode() select.addMainCode() generated_files = [] usecase = config['modelName']+'_'+config['modelVersion'] deploy_path = Path(config["deploy_path"])/'MLaC'/'FeatureEngineering' deploy_path.mkdir(parents=True, exist_ok=True) # create the utility file importer.addLocalModule('*', mod_from='utility') utility_obj = utility_function('selector') with open(deploy_path/"utility.py", 'w') as f: f.write(file_header(usecase) + utility_obj.get_code()) generated_files.append("utility.py") # create empty init file to make a package with open(deploy_path/"__init__.py", 'w') as f: f.write(file_header(usecase)) generated_files.append("__init__.py") code = file_header(usecase) code += importer.getCode() code += select.getInputOutputFiles() code += function.getCode() select.addLocalFunctionsCode() code += select.getCode() with open(deploy_path/"aionCode.py", "w") as f: f.write(code) generated_files.append("aionCode.py") with open(deploy_path/"requirements.txt", "w") as f: req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()]) f.write(req) generated_files.append("requirements.txt") config_file = deploy_path/"config.json" config_data = get_selector_params(config) with open (config_file, "w") as f: json.dump(config_data, f, indent=4) generated_files.append("config.json") create_docker_file('selector', deploy_path,config['modelName'], generated_files)
utility.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ import datetime from pathlib import Path variables = {} def update_variable(name, value): variables[name] = value def get_variable(name, default=None): return variables.get(name, default) def append_variable(name, value): data = get_variable(name) if not data: update_variable(name, [value]) elif not isinstance(data, list): update_variable(name, [data, value]) else: data.append(value) update_variable(name, data) def addDropFeature(feature, features_list, coder, indent=1): coder.addStatement(f'if {feature} in {features_list}:', indent=indent) coder.addStatement(f'{features_list}.remove({feature})', indent=indent+1) def importModules(importer, modules_list): for module in modules_list: mod_from = module.get('mod_from',None) mod_as = module.get('mod_as',None) importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as) def file_header(use_case, module_name=None): time_str = datetime.datetime.now().isoformat(timespec='seconds', sep=' ') text = "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n" return text + f"'''\nThis file is automatically generated by AION for {use_case} usecase.\nFile generation time: {time_str}\n'''" def get_module_mapping(module): mapping = { "LogisticRegression": {'module':'LogisticRegression', 'mod_from':'sklearn.linear_model'} ,"GaussianNB": {'module':'GaussianNB', 'mod_from':'sklearn.naive_bayes'} ,"DecisionTreeClassifier": {'module':'DecisionTreeClassifier', 'mod_from':'sklearn.tree'} ,"SVC": {'module':'SVC', 'mod_from':'sklearn.svm'} ,"KNeighborsClassifier": {'module':'KNeighborsClassifier', 'mod_from':'sklearn.neighbors'} ,"GradientBoostingClassifier": {'module':'GradientBoostingClassifier', 'mod_from':'sklearn.ensemble'} ,'RandomForestClassifier':{'module':'RandomForestClassifier','mod_from':'sklearn.ensemble'} ,'XGBClassifier':{'module':'XGBClassifier','mod_from':'xgboost'} ,'LGBMClassifier':{'module':'LGBMClassifier','mod_from':'lightgbm'} ,'CatBoostClassifier':{'module':'CatBoostClassifier','mod_from':'catboost'} ,"LinearRegression": {'module':'LinearRegression', 'mod_from':'sklearn.linear_model'} ,"Lasso": {'module':'Lasso', 'mod_from':'sklearn.linear_model'} ,"Ridge": {'module':'Ridge', 'mod_from':'sklearn.linear_model'} ,"DecisionTreeRegressor": {'module':'DecisionTreeRegressor', 'mod_from':'sklearn.tree'} ,'RandomForestRegressor':{'module':'RandomForestRegressor','mod_from':'sklearn.ensemble'} ,'XGBRegressor':{'module':'XGBRegressor','mod_from':'xgboost'} ,'LGBMRegressor':{'module':'LGBMRegressor','mod_from':'lightgbm'} ,'CatBoostRegressor':{'module':'CatBoostRegressor','mod_from':'catboost'} } return mapping.get(module, None) def create_docker_file(name, path,usecasename,files=[],text_feature=False): text = "" if name == 'load_data': text='FROM python:3.8-slim-buster' text+='\n' text+='LABEL "usecase"="'+str(usecasename)+'"' text+='\n' text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"' for file in files: text+=f'\nCOPY {file} {file}' text+='\n' text+='RUN pip install --no-cache-dir -r requirements.txt' elif name == 'transformer': text='FROM python:3.8-slim-buster\n' text+='LABEL "usecase"="'+str(usecasename)+'"' text+='\n' text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"' text+='\n' for file in files: text+=f'\nCOPY {file} {file}' if text_feature: text+='COPY AIX-0.1-py3-none-any.whl AIX-0.1-py3-none-any.whl\n' text+='\n' text+='''RUN \ ''' if text_feature: text += ''' git && pip install requests && pip install git+https://github.com/MCFreddie777/language-check.git\ && ''' text+=''' pip install --no-cache-dir -r requirements.txt\ ''' if text_feature: text += ''' && python -m nltk.downloader stopwords && python -m nltk.downloader punkt && python -m nltk.downloader wordnet && python -m nltk.downloader averaged_perceptron_tagger\ ''' text+='\n' elif name == 'selector': text='FROM python:3.8-slim-buster' text+='\n' text+='LABEL "usecase"="'+str(usecasename)+'"' text+='\n' text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"' text+='\n' for file in files: text+=f'\nCOPY {file} {file}' text+='\n' text+='RUN pip install --no-cache-dir -r requirements.txt' elif name == 'train': text='FROM python:3.8-slim-buster' text+='\n' text+='LABEL "usecase"="'+str(usecasename)+'"' text+='\n' text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"' text+='\n' for file in files: text+=f'\nCOPY {file} {file}' text+='\n' text+='RUN pip install --no-cache-dir -r requirements.txt' elif name == 'register': text='FROM python:3.8-slim-buster' text+='\n' text+='LABEL "usecase"="'+str(usecasename)+'"' text+='\n' text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"' text+='\n' for file in files: text+=f'\nCOPY {file} {file}' text+='\n' text+='RUN pip install --no-cache-dir -r requirements.txt' elif name == 'Prediction': text='FROM python:3.8-slim-buster' text+='\n' text+='LABEL "usecase"="'+str(usecasename)+'"' text+='\n' text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"' text+='\n' for file in files: text+=f'\nCOPY {file} {file}' text+='\n' if text_feature: text+='COPY AIX-0.1-py3-none-any.whl AIX-0.1-py3-none-any.whl\n' text+='''RUN \ ''' if text_feature: text += ''' git && pip install requests && pip install git+https://github.com/MCFreddie777/language-check.git\ && ''' text+='''pip install --no-cache-dir -r requirements.txt\ ''' if text_feature: text += ''' && python -m nltk.downloader stopwords && python -m nltk.downloader punkt && python -m nltk.downloader wordnet && python -m nltk.downloader averaged_perceptron_tagger\ ''' text+='\n' text+='ENTRYPOINT ["python", "aionCode.py","-ip","0.0.0.0","-pn","8094"]\n' elif name == 'input_drift': text='FROM python:3.8-slim-buster' text+='\n' text+='LABEL "usecase"="'+str(usecasename)+'"' text+='\n' text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"' text+='\n' for file in files: text+=f'\nCOPY {file} {file}' text+='\n' text+='RUN pip install --no-cache-dir -r requirements.txt' file_name = Path(path)/'Dockerfile' with open(file_name, 'w') as f: f.write(text)
drift_analysis.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from pathlib import Path import json from mlac.timeseries.core import * from .utility import * def get_drift_params(config): param_keys = ["modelVersion","problem_type","retrainThreshold","dataLocation"] data = {key:value for (key,value) in config.items() if key in param_keys} data['targetPath'] = config['modelName'] return data def run_drift_analysis(config): importer = importModule() monitor = drift() monitor.addLocalFunctionsCode() monitor.addPrefixCode() monitor.addMainCode() importModules(importer, monitor.getMainCodeModules()) importer.addModule('warnings') generated_files = [] usecase = config['modelName']+'_'+config['modelVersion'] deploy_path = Path(config["deploy_path"])/'MLaC'/'ModelMonitoring' deploy_path.mkdir(parents=True, exist_ok=True) # create the utility file importer.addLocalModule('utility', mod_as='utils') utility_obj = utility_function('load_data') with open(deploy_path/"utility.py", 'w') as f: f.write(file_header(usecase) + utility_obj.get_code()) generated_files.append("utility.py") # create empty init file required for creating a package with open(deploy_path/"__init__.py", 'w') as f: f.write(file_header(usecase)) generated_files.append("__init__.py") code = importer.getCode() code += '\nwarnings.filterwarnings("ignore")\n' code += monitor.getInputOutputFiles() code += monitor.getCode() # create serving file with open(deploy_path/"aionCode.py", 'w') as f: f.write(file_header(usecase) + code) generated_files.append("aionCode.py") # create requirements file req_file = deploy_path/"requirements.txt" with open(req_file, "w") as f: req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()]) f.write(req) generated_files.append("requirements.txt") # create config file with open (deploy_path/"config.json", "w") as f: json.dump(get_drift_params(config), f, indent=4) generated_files.append("config.json") # create docker file create_docker_file('input_drift', deploy_path,config['modelName'], generated_files)
transformer.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from pathlib import Path import json import platform from mlac.timeseries.core import * from .utility import * output_file_map = { 'text' : {'text' : 'text_profiler.pkl'}, 'targetEncoder' : {'targetEncoder' : 'targetEncoder.pkl'}, 'featureEncoder' : {'featureEncoder' : 'inputEncoder.pkl'}, 'normalizer' : {'normalizer' : 'normalizer.pkl'} } def add_common_imports(importer): common_importes = [ {'module': 'json', 'mod_from': None, 'mod_as': None}, {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}, {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, {'module': 'argparse', 'mod_from': None, 'mod_as': None}, {'module': 'platform', 'mod_from': None, 'mod_as': None } ] for mod in common_importes: importer.addModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as']) def get_transformer_params(config): param_keys = ["modelVersion","problem_type","target_feature","train_features","text_features","profiler","test_ratio","dateTimeFeature"] #BugID:13217 data = {key:value for (key,value) in config.items() if key in param_keys} data['targetPath'] = config['modelName'] return data def run_transformer(config): transformer = profiler() importer = importModule() function = global_function() importModules(importer, transformer.getPrefixModules()) importer.addModule('warnings') transformer.addPrefixCode() importModules(importer, transformer.getMainCodeModules()) transformer.addMainCode() usecase = config['modelName']+'_'+config['modelVersion'] deploy_path = Path(config["deploy_path"])/'MLaC'/'DataTransformation' deploy_path.mkdir(parents=True, exist_ok=True) generated_files = [] # create the utility file importer.addLocalModule('*', mod_from='utility') utility_obj = utility_function('transformer') with open(deploy_path/"utility.py", 'w') as f: f.write(file_header(usecase) + utility_obj.get_code()) generated_files.append("utility.py") # create empty init file to make a package with open(deploy_path/"__init__.py", 'w') as f: f.write(file_header(usecase)) generated_files.append("__init__.py") code = file_header(usecase) code += "\nimport os\nos.path.abspath(os.path.join(__file__, os.pardir))\n" #chdir to import from current dir code += importer.getCode() code += '\nwarnings.filterwarnings("ignore")\n' code += transformer.getInputOutputFiles() code += function.getCode() transformer.addLocalFunctionsCode() code += transformer.getCode() with open(deploy_path/"aionCode.py", "w") as f: f.write(code) generated_files.append("aionCode.py") with open(deploy_path/"requirements.txt", "w") as f: req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()]) f.write(req) generated_files.append("requirements.txt") config_file = deploy_path/"config.json" config_data = get_transformer_params(config) with open (config_file, "w") as f: json.dump(config_data, f, indent=4) generated_files.append("config.json") create_docker_file('transformer', deploy_path,config['modelName'], generated_files)
register.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from pathlib import Path import json from mlac.timeseries.core import * from .utility import * def get_register_params(config, models): param_keys = ["modelVersion","problem_type"] data = {key:value for (key,value) in config.items() if key in param_keys} data['targetPath'] = config['modelName'] data['models'] = models return data def run_register(config): importer = importModule() registration = register(importer) models = get_variable('models_name') smaller_is_better = get_variable('smaller_is_better', False) registration.addLocalFunctionsCode(models) registration.addPrefixCode(smaller_is_better) registration.addMainCode(models) importModules(importer, registration.getMainCodeModules()) importer.addModule('warnings') generated_files = [] usecase = config['modelName']+'_'+config['modelVersion'] deploy_path = Path(config["deploy_path"])/'MLaC'/'ModelRegistry' deploy_path.mkdir(parents=True, exist_ok=True) # create the utility file importer.addLocalModule('utility', mod_as='utils') utility_obj = utility_function('register') with open(deploy_path/"utility.py", 'w') as f: f.write(file_header(usecase) + utility_obj.get_code()) generated_files.append("utility.py") # create empty init file required for creating a package with open(deploy_path/"__init__.py", 'w') as f: f.write(file_header(usecase)) generated_files.append("__init__.py") code = importer.getCode() code += '\nwarnings.filterwarnings("ignore")\n' code += registration.getInputOutputFiles() code += registration.getCode() # create serving file with open(deploy_path/"aionCode.py", 'w') as f: f.write(file_header(usecase) + code) generated_files.append("aionCode.py") # create requirements file req_file = deploy_path/"requirements.txt" with open(req_file, "w") as f: req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()]) f.write(req) generated_files.append("requirements.txt") # create config file with open (deploy_path/"config.json", "w") as f: json.dump(get_register_params(config, models), f, indent=4) generated_files.append("config.json") # create docker file create_docker_file('register', deploy_path,config['modelName'], generated_files)
__init__.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from .load_data import run_loader from .transformer import run_transformer from .selector import run_selector from .trainer import run_trainer from .register import run_register from .deploy import run_deploy from .drift_analysis import run_drift_analysis
load_data.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from pathlib import Path import json import platform from mlac.timeseries.core import * from .utility import * imported_modules = [ {'module': 'json', 'mod_from': None, 'mod_as': None}, {'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}, {'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}, {'module': 'argparse', 'mod_from': None, 'mod_as': None}, {'module': 'platform', 'mod_from': None, 'mod_as': None } ] def get_load_data_params(config): param_keys = ["modelVersion","problem_type","target_feature","selected_features","dateTimeFeature","dataLocation"] data = {key:value for (key,value) in config.items() if key in param_keys} data['targetPath'] = config['modelName'] return data def run_loader(config): generated_files = [] importer = importModule() loader = tabularDataReader() importModules(importer, imported_modules) usecase = config['modelName']+'_'+config['modelVersion'] deploy_path = Path(config["deploy_path"])/'MLaC'/'DataIngestion' deploy_path.mkdir(parents=True, exist_ok=True) # create the utility file importer.addLocalModule('*', mod_from='utility') utility_obj = utility_function('load_data') with open(deploy_path/"utility.py", 'w') as f: f.write(file_header(usecase) + utility_obj.get_code()) generated_files.append("utility.py") # create the production data reader file importer.addLocalModule('dataReader', mod_from='data_reader') readers = ['sqlite','influx'] if 's3' in config.keys(): readers.append('s3') reader_obj = data_reader(readers) with open(deploy_path/"data_reader.py", 'w') as f: f.write(file_header(usecase) + reader_obj.get_code()) generated_files.append("data_reader.py") # create empty init file to make a package with open(deploy_path/"__init__.py", 'w') as f: f.write(file_header(usecase)) generated_files.append("__init__.py") code = file_header(usecase) code += importer.getCode() code += loader.getInputOutputFiles() loader.addLocalFunctionsCode() loader.addLoadDataCode() loader.addMainCode() code += loader.getCode() with open(deploy_path/"aionCode.py", "w") as f: f.write(code) generated_files.append("aionCode.py") with open(deploy_path/"requirements.txt", "w") as f: req=importer.getBaseModule(extra_importers=[utility_obj.get_importer(), reader_obj.get_importer()]) f.write(req) generated_files.append("requirements.txt") config_file = deploy_path/"config.json" config_data = get_load_data_params(config) with open (config_file, "w") as f: json.dump(config_data, f, indent=4) generated_files.append("config.json") create_docker_file('load_data', deploy_path,config['modelName'],generated_files)
__init__.py
""" /** * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * © Copyright HCL Technologies Ltd. 2021, 2022 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. */ """ from .imports import importModule from .load_data import tabularDataReader from .transformer import transformer as profiler from .selector import selector from .trainer import learner from .deploy import deploy from .functions import global_function
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
baseline.py
import joblib import pandas as pd import sys import math import time import pandas as pd import numpy as np from sklearn.metrics import confusion_matrix from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix from sklearn.svm import SVC from sklearn.linear_model import LinearRegression import argparse import json def mltesting(modelfile,datafile,features,target): model = joblib.load(modelfile) ProblemName = model.__class__.__name__ if ProblemName in ['LogisticRegression','SGDClassifier','SVC','DecissionTreeClassifier','RandomForestClassifier','GaussianNB','KNeighborsClassifier','DecisionTreeClassifier','GradientBoostingClassifier','XGBClassifier','LGBMClassifier','CatBoostClassifier']: Problemtype = 'Classification' elif ProblemName in ['LinearRegression','Lasso','Ridge','DecisionTreeRegressor','RandomForestRegressor','GradientBoostingRegressor','XGBRegressor','LGBMRegressor','CatBoostRegressor']: Problemtype = 'Regression' else: Problemtype = 'Unknown' if Problemtype == 'Classification': Params = model.get_params() try: df = pd.read_csv(datafile,encoding='utf-8',skipinitialspace = True) if ProblemName == 'LogisticRegression' or ProblemName == 'DecisionTreeClassifier' or ProblemName == 'RandomForestClassifier' or ProblemName == 'GaussianNB' or ProblemName == 'KNeighborsClassifier' or ProblemName == 'GradientBoostingClassifier' or ProblemName == 'SVC': features = model.feature_names_in_ elif ProblemName == 'XGBClassifier': features = model.get_booster().feature_names elif ProblemName == 'LGBMClassifier': features = model.feature_name_ elif ProblemName == 'CatBoostClassifier': features = model.feature_names_ modelfeatures = features dfp = df[modelfeatures] tar = target target = df[tar] predic = model.predict(dfp) output = {} matrixconfusion = pd.DataFrame(confusion_matrix(predic,target)) matrixconfusion = matrixconfusion.to_json(orient='index') classificationreport = pd.DataFrame(classification_report(target,predic,output_dict=True)).transpose() classificationreport = round(classificationreport,2) classificationreport = classificationreport.to_json(orient='index') output["Precision"] = "%.2f" % precision_score(target, predic,average='weighted') output["Recall"] = "%.2f" % recall_score(target, predic,average='weighted') output["Accuracy"] = "%.2f" % accuracy_score(target, predic) output["ProblemName"] = ProblemName output["Status"] = "Success" output["Params"] = Params output["Problemtype"] = Problemtype output["Confusionmatrix"] = matrixconfusion output["classificationreport"] = classificationreport # import statistics # timearray = [] # for i in range(0,5): # start = time.time() # predic1 = model.predict(dfp.head(1)) # end = time.time() # timetaken = (round((end - start) * 1000,2),'Seconds') # timearray.append(timetaken) # print(timearray) start = time.time() for i in range(0,5): predic1 = model.predict(dfp.head(1)) end = time.time() timetaken = (round((end - start) * 1000,2),'Seconds') # print(timetaken) start1 = time.time() for i in range(0,5): predic2 = model.predict(dfp.head(10)) end1 = time.time() timetaken1 = (round((end1 - start1) * 1000,2) ,'Seconds') # print(timetaken1) start2 = time.time() for i in range(0,5): predic3 = model.predict(dfp.head(100)) end2 = time.time() timetaken2 = (round((end2 - start2) * 1000,2) ,'Seconds') # print(timetaken2) output["onerecord"] = timetaken output["tenrecords"] = timetaken1 output["hundrecords"] = timetaken2 print(json.dumps(output)) except Exception as e: output = {} output['Problemtype']='Classification' output['Status']= "Fail" output["ProblemName"] = ProblemName output["Msg"] = 'Detected Model : {} \\n Problem Type : Classification \\n Error : {}'.format(ProblemName, str(e).replace('"','//"').replace('\n', '\\n')) print(output["Msg"]) print(json.dumps(output)) elif Problemtype == 'Regression': Params = model.get_params() try: df = pd.read_csv(datafile,encoding='utf-8',skipinitialspace = True) if ProblemName == 'LinearRegression' or ProblemName == 'Lasso' or ProblemName == 'Ridge' or ProblemName == 'DecisionTreeRegressor' or ProblemName == 'RandomForestRegressor' or ProblemName == 'GaussianNB' or ProblemName == 'KNeighborsRegressor' or ProblemName == 'GradientBoostingRegressor': features = model.feature_names_in_ elif ProblemName == 'XGBRegressor': features = model.get_booster().feature_names elif ProblemName == 'LGBMRegressor': features = model.feature_name_ elif ProblemName == 'CatBoostRegressor': features = model.feature_names_ modelfeatures = features dfp = df[modelfeatures] tar = target target = df[tar] predict = model.predict(dfp) mse = mean_squared_error(target, predict) mae = mean_absolute_error(target, predict) rmse = math.sqrt(mse) r2 = r2_score(target,predict,multioutput='variance_weighted') output = {} output["MSE"] = "%.2f" % mean_squared_error(target, predict) output["MAE"] = "%.2f" % mean_absolute_error(target, predict) output["RMSE"] = "%.2f" % math.sqrt(mse) output["R2"] = "%.2f" %r2_score(target,predict,multioutput='variance_weighted') output["ProblemName"] = ProblemName output["Problemtype"] = Problemtype output["Params"] = Params output['Status']='Success' start = time.time() predic1 = model.predict(dfp.head(1)) end = time.time() timetaken = (round((end - start) * 1000,2) ,'Seconds') # print(timetaken) start1 = time.time() predic2 = model.predict(dfp.head(10)) end1 = time.time() timetaken1 = (round((end1 - start1) * 1000,2),'Seconds') # print(timetaken1) start2 = time.time() predic3 = model.predict(dfp.head(100)) end2 = time.time() timetaken2 = (round((end2 - start2) * 1000,2) ,'Seconds') # print(timetaken2) output["onerecord"] = timetaken output["tenrecords"] = timetaken1 output["hundrecords"] = timetaken2 print(json.dumps(output)) except Exception as e: output = {} output['Problemtype']='Regression' output['Status']='Fail' output["ProblemName"] = ProblemName output["Msg"] = 'Detected Model : {} \\n Problem Type : Regression \\n Error : {}'.format(ProblemName, str(e).replace('"','//"').replace('\n', '\\n')) print(json.dumps(output)) else: output = {} output['Problemtype']='Unknown' output['Status']='Fail' output['Params'] = '' output["ProblemName"] = ProblemName output["Msg"] = 'Detected Model : {} \\n Error : {}'.format(ProblemName, 'Model not supported') print(json.dumps(output)) return(json.dumps(output)) def baseline_testing(modelFile,csvFile,features,target): features = [x.strip() for x in features.split(',')] return mltesting(modelFile,csvFile,features,target)
uq_interface.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' #from sklearn.externals import joblib import joblib # import pyreadstat # import sys # import math import time import pandas as pd import numpy as np from sklearn.metrics import confusion_matrix from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix from sklearn.svm import SVC from sklearn.linear_model import LinearRegression import argparse import json import os import pathlib from tensorflow.keras.models import load_model # from tensorflow.keras import backend as K import tensorflow as tf # from sklearn.decomposition import LatentDirichletAllocation from pathlib import Path #from aionUQ import aionUQ from uq_main import aionUQ import os from datetime import datetime from sklearn.model_selection import train_test_split parser = argparse.ArgumentParser() parser.add_argument('savFile') parser.add_argument('csvFile') parser.add_argument('features') parser.add_argument('target') args = parser.parse_args() from appbe.dataPath import DEPLOY_LOCATION if ',' in args.features: args.features = [x.strip() for x in args.features.split(',')] else: args.features = args.features.split(",") models = args.savFile if Path(models).is_file(): # if Path(args.savFile.is_file()): model = joblib.load(args.savFile) # print(model.__class__.__name__) # print('class:',model.__class__) # print(type(model).__name__) # try: # print('Classess=',model.classes_) # except: # print("Classess=N/A") # print('params:',model.get_params()) # try: # print('fea_imp =',model.feature_importances_) # except: # print("fea_imp =N/A") ProblemName = model.__class__.__name__ Params = model.get_params() # print("ProblemName: \n",ProblemName) # print("Params: \n",Params) # print('ProblemName:',model.__doc__) # print(type(ProblemName)) if ProblemName in ['LogisticRegression','SGDClassifier','SVC','DecissionTreeClassifier','RandomForestClassifier','GaussianNB','KNeighboursClassifier','DecisionTreeClassifier','GradientBoostingClassifier']: Problemtype = 'Classification' else : Problemtype = 'Regression' if Problemtype == 'Classification': df = pd.read_csv(args.csvFile) object_cols = [col for col, col_type in df.dtypes.items() if col_type == 'object'] df = df.drop(object_cols, axis=1) df = df.dropna(axis=1) df = df.reset_index(drop=True) modelfeatures = args.features # dfp = df[modelfeatures] tar = args.target # target = df[tar] y=df[tar] X = df.drop(tar, axis=1) #for dummy test,train values pass X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) uqObj=aionUQ(df,X,y,ProblemName,Params,model,modelfeatures,tar) #accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per=uqObj.uqMain_BBMClassification(X_train, X_test, y_train, y_test,"uqtest") accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per=uqObj.uqMain_BBMClassification() # print("UQ Classification: \n",output_jsonobject) print(accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertaitny_per) print("End of UQ Classification.\n") else: df = pd.read_csv(args.csvFile) modelfeatures = args.features # print("modelfeatures: \n",modelfeatures) # print("type modelfeatures: \n",type(modelfeatures)) dfp = df[modelfeatures] tar = args.target target = df[tar] #Not used, just dummy X,y split y=df[tar] X = df.drop(tar, axis=1) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) uqObj=aionUQ(df,dfp,target,ProblemName,Params,model,modelfeatures,tar) total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject=uqObj.uqMain_BBMRegression() print(total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject) print("End of UQ reg\n") elif Path(models).is_dir(): os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices' os.environ['TF_CPP_MIN_LOG_LEVEL']='2' model = load_model(models) ProblemName = model.__class__.__name__ Problemtype = 'Classification' # print('class:',model.__class__) # print('class1',model.__class__.__name__) # print(model.summary()) # print('ProblemName1:',model.get_config()) def Params(model: tf.keras.Model): Params = [] model.Params(print_fn=lambda x: Params.append(x)) return '\n'.join(Params) df = pd.read_csv(args.csvFile) modelfeatures = args.features dfp = df[modelfeatures] tar = args.target target = df[tar] df3 = dfp.astype(np.float32) predic = model.predict(df3) if predic.shape[-1] > 1: predic = np.argmax(predic, axis=-1) else: predic = (predic > 0.5).astype("int32") matrixconfusion = pd.DataFrame(confusion_matrix(predic,target)) matrixconfusion = matrixconfusion.to_json(orient='index') classificationreport = pd.DataFrame(classification_report(target,predic,output_dict=True)).transpose() classificationreport = round(classificationreport,2) classificationreport = classificationreport.to_json(orient='index') output = {} output["Precision"] = "%.3f" % precision_score(target, predic,average='weighted') output["Recall"] = "%.3f" % recall_score(target, predic,average='weighted') output["Accuracy"] = "%.3f" % accuracy_score(target, predic) output["ProblemName"] = ProblemName output["Params"] = Params output["Problemtype"] = Problemtype output["Confusionmatrix"] = matrixconfusion output["classificationreport"] = classificationreport print(json.dumps(output))
aionUQ.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import logging logging.getLogger('tensorflow').disabled = True import json #from nltk.corpus import stopwords from collections import Counter from matplotlib import pyplot import sys import os import json import matplotlib.pyplot as plt from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression from uq360.algorithms.ucc_recalibration import UCCRecalibration from sklearn import datasets from sklearn.model_selection import train_test_split import pandas as pd from uq360.metrics.regression_metrics import compute_regression_metrics import numpy as np from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score from sklearn.metrics import roc_curve # from math import sqrt from sklearn.metrics import r2_score,mean_squared_error, explained_variance_score,mean_absolute_error # from uq360.metrics import picp, mpiw, compute_regression_metrics, plot_uncertainty_distribution, plot_uncertainty_by_feature, plot_picp_by_feature from uq360.metrics import plot_uncertainty_by_feature, plot_picp_by_feature #Added libs from MLTest import sys import time from sklearn.metrics import confusion_matrix from pathlib import Path import logging # import json class aionUQ: # def __init__(self,uqdf,targetFeature,xtrain,ytrain,xtest,ytest,uqconfig_base,uqconfig_meta,deployLocation,saved_model): def __init__(self,df,dfp,target,ProblemName,Params,model,modelfeatures,targetfeature,deployLocation): # #printprint("Inside aionUQ \n") try: #print("Inside aionUQ init\n ") self.data=df self.dfFeatures=dfp self.uqconfig_base=Params self.uqconfig_meta=Params self.targetFeature=targetfeature self.target=target self.selectedfeature=modelfeatures self.y=self.target self.X=self.dfFeatures self.log = logging.getLogger('eion') self.basemodel=model self.model_name=ProblemName self.Deployment = os.path.join(deployLocation,'log','UQ') os.makedirs(self.Deployment,exist_ok=True) self.uqgraphlocation = os.path.join(self.Deployment,'UQgraph') os.makedirs(self.uqgraphlocation,exist_ok=True) except Exception as e: self.log.info('<!------------- UQ model INIT Error ---------------> '+str(e)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def totalUncertainty(self,df,basemodel,model_params,xtrain, xtest, ytrain, ytest,aionstatus): from sklearn.model_selection import train_test_split # To get each class values and uncertainty if (aionstatus.lower() == 'aionuq'): X_train, X_test, y_train, y_test = xtrain, xtest, ytrain, ytest # y_val = y_train.append(y_test) else: # y_val = self.y df=self.data y=df[self.targetFeature] X = df.drop(self.targetFeature, axis=1) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) key = 'criterion' #if key in model_params: try: #if model_params.has_key(key): if key in model_params: if (model_params['criterion']): uq_scoring_param=model_params.get('criterion') elif(model_params['criterion'] == None): uq_scoring_param='picp' else: uq_scoring_param='picp' else: uq_scoring_param='picp' pass except Exception as inst: uq_scoring_param='picp' # from sklearn.tree import DecisionTreeRegressor # from sklearn.linear_model import LinearRegression,Lasso,Ridge # from sklearn import linear_model # from sklearn.ensemble import RandomForestRegressor if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']: uq_scoring_param=uq_scoring_param else: uq_scoring_param='picp' uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params) # this will fit both the base and the meta model uqmodel_fit = uq_model.fit(X_train, y_train) y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test) y_hat_total_mean=np.mean(y_hat) y_hat_lb_total_mean=np.mean(y_hat_lb) y_hat_ub_total_mean=np.mean(y_hat_ub) mpiw_20_per=(y_hat_total_mean*20/100) mpiw_lower_range = y_hat_total_mean - mpiw_20_per mpiw_upper_range = y_hat_total_mean + mpiw_20_per from uq360.metrics import picp, mpiw observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub) observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub) observed_alphas_picp=round(observed_alphas_picp,2) observed_widths_mpiw=round(observed_widths_mpiw,2) picp_percentage= round(observed_alphas_picp*100) Uncertainty_percentage=round(100-picp_percentage) self.log.info('Model total observed_widths_mpiw : '+str(observed_widths_mpiw)) self.log.info('Model mpiw_lower_range : '+str(mpiw_lower_range)) self.log.info('Model mpiw_upper_range : '+str(mpiw_upper_range)) self.log.info('Model total picp_percentage : '+str(picp_percentage)) return observed_alphas_picp,observed_widths_mpiw,picp_percentage,Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range def display_results(self,X_test, y_test, y_mean, y_lower, y_upper): try: global x_feature,y_feature if (isinstance(self.selectedfeature, list) or isinstance(self.selectedfeature, tuple)): x_feature=''.join(map(str, self.selectedfeature)) else: x_feature= str(self.selectedfeature) # self.selectedfeature=str(self.selectedfeature) X_test=np.squeeze(X_test) y_feature=str(self.targetFeature) pred_dict = {x_feature: X_test, 'y': y_test, 'y_mean': y_mean, 'y_upper': y_upper, 'y_lower': y_lower } pred_df = pd.DataFrame(data=pred_dict) pred_df_sorted = pred_df.sort_values(by=x_feature) plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y'], 'o', label='Observed') plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y_mean'], '-', lw=2, label='Predicted') plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y_upper'], 'r--', lw=2, label='Upper Bound') plt.plot(pred_df_sorted[x_feature], pred_df_sorted['y_lower'], 'r--', lw=2, label='Lower Bound') plt.legend() plt.xlabel(x_feature) plt.ylabel(y_feature) plt.title('UQ Confidence Interval Plot.') # plt.savefig('uq_test_plt.png') if os.path.exists(str(self.uqgraphlocation)+'/uq_test_plt.png'): os.remove(str(self.uqgraphlocation)+'/uq_test_plt.png') plt.savefig(str(self.Deployment)+'/uq_test_plt.png') plt.savefig(str(self.uqgraphlocation)+'/uq_test_plt.png') plt.clf() plt.cla() plt.close() pltreg=plot_picp_by_feature(X_test, y_test, y_lower, y_upper, xlabel=x_feature) #pltreg.savefig('x.png') pltr=pltreg.figure if os.path.exists(str(self.uqgraphlocation)+'/picp_per_feature.png'): os.remove(str(self.uqgraphlocation)+'/picp_per_feature.png') pltr.savefig(str(self.Deployment)+'/picp_per_feature.png') pltr.savefig(str(self.uqgraphlocation)+'/picp_per_feature.png') plt.clf() plt.cla() plt.close() except Exception as e: # #print("display exception: \n",e) self.log.info('<!------------- UQ model Display Error ---------------> '+str(e)) def classUncertainty(self,pred,score): try: outuq = {} classes = np.unique(pred) for c in classes: ids = pred == c class_score = score[ids] predc = 'Class_'+str(c) outuq[predc]=np.mean(class_score) x = np.mean(class_score) #Uncertaininty in percentage x=x*100 self.log.info('----------------> Class '+str(c)+' Confidence Score '+str(round(x))) return outuq except Exception as e: # #print("display exception: \n",e) self.log.info('<!------------- UQ classUncertainty Error ---------------> '+str(e)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def uqMain_BBMClassification(self,x_train, x_test, y_train, y_test,aionstatus): try: # print("Inside uqMain_BBMClassification\n") # print("lenth of x_train {}, x_test {}, y_train {}, y_test {}".format(x_train, x_test, y_train, y_test)) aionstatus = str(aionstatus) if (aionstatus.lower() == 'aionuq'): X_train, X_test, y_train, y_test = x_train, x_test, y_train, y_test else: X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.3, random_state=0) from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelClassification from uq360.metrics.classification_metrics import plot_reliability_diagram,area_under_risk_rejection_rate_curve,plot_risk_vs_rejection_rate,expected_calibration_error,compute_classification_metrics from sklearn.ensemble import GradientBoostingClassifier from sklearn.linear_model import LogisticRegression from sklearn.linear_model import SGDClassifier from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from xgboost import XGBClassifier from lightgbm import LGBMClassifier from sklearn.neighbors import KNeighborsClassifier base_modelname=__class__.__name__ base_config = self.uqconfig_base meta_config = self.uqconfig_base model_name=self.basemodel.__class__.__name__ #print(model_name) try: #geting used features model_used_features=self.basemodel.feature_names_in_ self.log.info("Base model used training features are (UQ Testing): \n"+str(model_used_features)) except: pass model_params=self.basemodel.get_params() uq_scoring_param='accuracy' basemodel=None if (model_name == "GradientBoostingClassifier"): basemodel=GradientBoostingClassifier elif (model_name == "SGDClassifier"): basemodel=SGDClassifier elif (model_name == "GaussianNB"): basemodel=GaussianNB elif (model_name == "DecisionTreeClassifier"): basemodel=DecisionTreeClassifier elif(model_name == "RandomForestClassifier"): basemodel=RandomForestClassifier elif (model_name == "SVC"): basemodel=SVC elif(model_name == "KNeighborsClassifier"): basemodel=KNeighborsClassifier elif(model_name.lower() == "logisticregression"): basemodel=LogisticRegression elif(model_name == "XGBClassifier"): basemodel=XGBClassifier elif(model_name == "LGBMClassifier"): basemodel=LGBMClassifier else: basemodel=LogisticRegression calibrated_mdl=None if (model_name == "SVC"): from sklearn.calibration import CalibratedClassifierCV basemodel=SVC(**model_params) calibrated_mdl = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3) calibrated_mdl.fit(X_train, y_train) basepredict = calibrated_mdl.predict(X_test) predprob_base = calibrated_mdl.predict_proba(X_test)[:, :] elif (model_name == "SGDClassifier"): from sklearn.calibration import CalibratedClassifierCV basemodel=SGDClassifier(**model_params) calibrated_mdl = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3) calibrated_mdl.fit(X_train, y_train) basepredict = calibrated_mdl.predict(X_test) predprob_base = calibrated_mdl.predict_proba(X_test)[:, :] else: from sklearn.calibration import CalibratedClassifierCV base_mdl = basemodel(**model_params) calibrated_mdl = CalibratedClassifierCV(base_mdl,method='sigmoid',cv=3) basemodelfit = calibrated_mdl.fit(X_train, y_train) basepredict = calibrated_mdl.predict(X_test) predprob_base=calibrated_mdl.predict_proba(X_test)[:, :] cal_model_params=calibrated_mdl.get_params() acc_score_base=accuracy_score(y_test, basepredict) base_estimator_calibrate = cal_model_params['base_estimator'] uq_model = BlackboxMetamodelClassification(base_model=self.basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params) try: X_train=X_train[model_used_features] X_test=X_test[model_used_features] except: pass uqmodel_fit = uq_model.fit(X_train, y_train,base_is_prefitted=True,meta_train_data=(X_train, y_train)) # uqmodel_fit = uq_model.fit(X_train, y_train) y_t_pred, y_t_score = uq_model.predict(X_test) acc_score=accuracy_score(y_test, y_t_pred) test_accuracy_perc=round(100*acc_score) if(aionstatus == "aionuq"): test_accuracy_perc=round(test_accuracy_perc,2) #uq_aurrrc not used for any aion gui configuration, so it initialized as 0. if we use area_under_risk_rejection_rate_curve(), it shows plot in cmd prompt,so code execution interuupted.so we make it 0. uq_aurrrc=0 pass else: bbm_c_plot = plot_risk_vs_rejection_rate( y_true=y_test, y_prob=predprob_base, selection_scores=y_t_score, y_pred=y_t_pred, plot_label=['UQ_risk_vs_rejection'], risk_func=accuracy_score, num_bins = 10 ) # This done by kiran, need to uncomment for GUI integration. # bbm_c_plot_sub = bbm_c_plot[4] bbm_c_plot_sub = bbm_c_plot if os.path.exists(str(self.uqgraphlocation)+'/plot_risk_vs_rejection_rate.png'): os.remove(str(self.uqgraphlocation)+'/plot_risk_vs_rejection_rate.png') # bbm_c_plot_sub.savefig(str(self.uqgraphlocation)+'/plot_risk_vs_rejection_rate.png') re_plot=plot_reliability_diagram(y_true=y_test, y_prob=predprob_base, y_pred=y_t_pred, plot_label=['UQModel reliability_diagram'], num_bins=10 ) # This done by kiran, need to uncomment for GUI integration. # re_plot_sub = re_plot[4] re_plot_sub = re_plot if os.path.exists(str(self.uqgraphlocation)+'/plot_reliability_diagram.png'): os.remove(str(self.uqgraphlocation)+'/plot_reliability_diagram.png') # re_plot_sub.savefig(str(DEFAULT_FILE_PATH)+'/plot_reliability_diagram.png') uq_aurrrc=area_under_risk_rejection_rate_curve( y_true=y_test, y_prob=predprob_base, y_pred=y_t_pred, selection_scores=y_t_score, attributes=None, risk_func=accuracy_score,subgroup_ids=None, return_counts=False, num_bins=10) uq_aurrrc=uq_aurrrc test_accuracy_perc=round(test_accuracy_perc) #metric_all=compute_classification_metrics(y_test, y_prob, option='all') metric_all=compute_classification_metrics(y_test, predprob_base, option='accuracy') #expected_calibration_error uq_ece=expected_calibration_error(y_test, y_prob=predprob_base,y_pred=basepredict, num_bins=10, return_counts=False) # uq_aurrrc=uq_aurrrc confidence_score=acc_score_base-uq_ece ece_confidence_score=round(confidence_score,2) # Model uncertainty using ECE score # model_uncertainty_ece = 1-ece_confidence_score #Uncertainty Using model inherent predict probability mean_predprob_total=np.mean(y_t_score) model_confidence=mean_predprob_total model_uncertainty = 1-mean_predprob_total model_confidence = round(model_confidence,2) # To get each class values and uncertainty if (aionstatus.lower() == 'aionuq'): y_val = np.append(y_train,y_test) else: y_val = self.y self.log.info('------------------> Model Confidence Score '+str(model_confidence)) outuq = self.classUncertainty(y_t_pred,y_t_score) # Another way to get conf score model_uncertainty_per=round((model_uncertainty*100),2) model_confidence_per=round((model_confidence*100),2) acc_score_per = round((acc_score*100),2) uq_ece_per=round((uq_ece*100),2) output={} recommendation = "" if (uq_ece > 0.5): # RED text recommendation = 'Model has high ece (expected calibration error) score compare to threshold (0.5),not good to be deploy. need to be add more input data across all feature ranges to train base model, also try with different classification algorithms/ensembling to reduce ECE (ECE~0).' else: # self.log.info('Model has good ECE score and accuracy, ready to deploy.\n.') if (uq_ece <= 0.1 and model_confidence >= 0.9): # Green Text recommendation = 'Model has best calibration score (near to 0) and good confidence score , ready to deploy. ' else: # Orange recommendation = 'Model has good ECE score (between 0.1-0.5), but less confidence score compare to threshold (90%). If user wants,model can be improve by adding more input data across all feature ranges and could be evaluate with different algorithms/ensembling. ' #Adding each class uncertainty value classoutput = {} for k,v in outuq.items(): classoutput[k]=(str(round((v*100),2))) output['classes'] = classoutput output['ModelConfidenceScore']=(str(model_confidence_per)) output['ExpectedCalibrationError']=str(uq_ece_per) output['ModelUncertainty']=str(model_uncertainty_per) output['Recommendation']=recommendation # output['user_msg']='Please check the plot for more understanding of model uncertainty' #output['UQ_area_under_risk_rejection_rate_curve']=round(uq_aurrrc,4) output['Accuracy']=str(acc_score_per) output['Problem']= 'Classification' #self.log.info('Model Accuracy score in percentage : '+str(test_accuracy_perc)+str(' %')) # #print("Prediction mean for the given model:",np.mean(y_hat),"\n") #self.log.info(recommendation) #self.log.info("Model_confidence_score: " +str(confidence_score)) #self.log.info("Model_uncertainty: " +str(round(model_uncertainty,2))) #self.log.info('Please check the plot for more understanding of model uncertainty.\n.') uq_jsonobject = json.dumps(output) with open(str(self.Deployment)+"/uq_classification_log.json", "w") as f: json.dump(output, f) return test_accuracy_perc,uq_ece,output,model_confidence_per,model_uncertainty_per except Exception as inst: self.log.info('\n < ---------- UQ Model Execution Failed Start--------->') self.log.info('\n<------Model Execution failed!!!.' + str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) self.log.info('\n < ---------- Model Execution Failed End --------->') def aion_confidence_plot(self,df): df=df df = df.sort_values(by=self.selectedfeature) best_values=df.Best_values.to_list() best_upper=df.Best__upper.to_list() best_lower=df.Best__lower.to_list() Total_Upper_PI=df.Total_Upper_PI.to_list() Total_Low_PI=df.Total_Low_PI.to_list() Obseved = df.Observed.to_list() plt.plot(df[x_feature], df['Observed'], 'o', label='Observed') plt.plot(df[x_feature], df['Best__upper'],'r--', lw=2, color='grey') plt.plot(df[x_feature], df['Best__lower'],'r--', lw=2, color='grey') plt.plot(df[x_feature], df['Best_values'], 'r--', lw=2, label='MeanPrediction',color='red') plt.fill_between(df[x_feature], Total_Upper_PI, Total_Low_PI, label='Good Confidence', color='lightblue', alpha=.5) plt.fill_between(df[x_feature],best_lower, best_upper,label='Best Confidence', color='orange', alpha=.5) plt.legend() plt.xlabel(self.selectedfeature) plt.ylabel(self.targetFeature) plt.title('UQ Best & Good Area Plot') if os.path.exists(str(self.uqgraphlocation)+'/uq_confidence_plt.png'): os.remove(str(self.uqgraphlocation)+'/uq_confidence_plt.png') plt.savefig(str(self.uqgraphlocation)+'/uq_confidence_plt.png') plt.savefig(str(self.Deployment)+'/uq_confidence_plt.png') def uqMain_BBMRegression(self,x_train, x_test, y_train, y_test,aionstatus): aionstatus = str(aionstatus) # if (aionstatus.lower() == 'aionuq'): # X_train, X_test, y_train, y_test = x_train, x_test, y_train, y_test # total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,x_train, x_test, y_train, y_test,aionstatus) # else: # X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.3, random_state=0) # modelName = "" self.log.info('<!------------- Inside BlackBox MetaModel Regression process. ---------------> ') try: from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression import pandas as pd base_modelname=__class__.__name__ base_config = self.uqconfig_base meta_config = self.uqconfig_base model_name=self.basemodel.__class__.__name__ model_params=self.basemodel.get_params() # #print("model_params['criterion']: \n",model_params['criterion']) key = 'criterion' #if key in model_params: try: #if model_params.has_key(key): if key in model_params: if (model_params['criterion']): uq_scoring_param=model_params.get('criterion') elif(model_params['criterion'] == None): uq_scoring_param='picp' else: uq_scoring_param='picp' else: uq_scoring_param='picp' pass except Exception as inst: uq_scoring_param='picp' # modelname='sklearn.linear_model'+'.'+model_name # X_train, X_test, y_train, y_test = self.xtrain,self.xtest,self.ytrain,self.ytest #Geeting trained model name and to use the model in BlackboxMetamodelRegression from sklearn.tree import DecisionTreeRegressor from sklearn.linear_model import LinearRegression,Lasso,Ridge from sklearn.ensemble import RandomForestRegressor if (model_name == "DecisionTreeRegressor"): basemodel=DecisionTreeRegressor elif (model_name == "LinearRegression"): basemodel=LinearRegression elif (model_name == "Lasso"): basemodel=Lasso elif (model_name == "Ridge"): basemodel=Ridge elif(model_name == "RandomForestRegressor"): basemodel=RandomForestRegressor else: basemodel=LinearRegression if (aionstatus.lower() == 'aionuq'): X_train, X_test, y_train, y_test = x_train, x_test, y_train, y_test total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,x_train, x_test, y_train, y_test,aionstatus) else: X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.3, random_state=0) total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,None, None, None, None,aionstatus) if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']: uq_scoring_param=uq_scoring_param else: uq_scoring_param='picp' uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params) # this will fit both the base and the meta model uqmodel_fit = uq_model.fit(X_train, y_train) # #print("X_train.shape: \n",X_train.shape) y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test) from uq360.metrics import picp, mpiw observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub) observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub) picp_percentage= round(observed_alphas_picp*100) Uncertainty_percentage=round(100-picp_percentage) self.log.info('<!------------- observed_picp: ---------------> '+str(observed_alphas_picp)) self.log.info('<!------------- observed_widths_mpiw: ---------------> '+str(observed_widths_mpiw)) # UQ metamodel regression have metrics as follows, “rmse”, “nll”, “auucc_gain”, “picp”, “mpiw”, “r2” #metric_all=compute_regression_metrics(y_test, y_hat,y_hat_lb, y_hat_ub,option='all',nll_fn=None) #nll - Gaussian negative log likelihood loss. metric_all=compute_regression_metrics(y_test, y_hat,y_hat_lb, y_hat_ub,option=uq_scoring_param,nll_fn=None) metric_used='' for k,v in metric_all.items(): metric_used=str(round(v,2)) self.log.info('<!------------- Metric used for regression UQ: ---------------> '+str(metric_all)) # Determine the confidence level and recommentation to the tester # test_data=y_test observed_alphas_picp=round(observed_alphas_picp,2) observed_widths_mpiw=round(observed_widths_mpiw,2) #Calculate total uncertainty for all features # total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage = self.totalUncertainty(self.data) # df1=self.data total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params,x_train, x_test, y_train, y_test,aionstatus) recommendation="" output={} if (observed_alphas_picp >= 0.95 and total_picp >= 0.75): # Add GREEN text self.log.info('Model has good confidence for the selected feature, ready to deploy.\n.') recommendation = "Model has good confidence score, ready to deploy." elif ((observed_alphas_picp >= 0.50 and observed_alphas_picp <= 0.95) and (total_picp >= 0.50)): # Orange recommendation = "Model has average confidence compare to threshold (95%), need to be add more input data across all feature ranges to train base model, also try with different regression algorithms/ensembling." self.log.info('Model has average confidence score compare to threshold, need to be add more input data for training base model and again try with UQ .') else: # RED text recommendation = "Model has less confidence compare to threshold (95%), need to be add more input data across all feature ranges to train base model, also try with different regression algorithms/ensembling." self.log.info('Model has less confidence score compare to threshold, need to be add more input data for training base model and again try with UQ .') #Build uq json info dict output['ModelConfidenceScore']=(str(total_picp_percentage)+'%') output['ModelUncertainty']=(str(total_Uncertainty_percentage)+'%') output['SelectedFeatureConfidence']=(str(picp_percentage)+'%') output['SelectedFeatureUncertainty']=(str(Uncertainty_percentage)+'%') output['PredictionIntervalCoverageProbability']=observed_alphas_picp output['MeanPredictionIntervalWidth']=round(observed_widths_mpiw) output['DesirableMPIWRange: ']=(str(round(mpiw_lower_range))+str(' - ')+str(round(mpiw_upper_range))) output['Recommendation']=str(recommendation) output['Metric']=uq_scoring_param output['Score']=metric_used output['Problemtype']= 'Regression' self.log.info('Model confidence in percentage is: '+str(picp_percentage)+str(' %')) self.log.info('Model Uncertainty is:: '+str(Uncertainty_percentage)+str(' %')) #self.log.info('Please check the plot for more understanding of model uncertainty.\n.') #self.display_results(X_test, y_test, y_mean=y_hat, y_lower=y_hat_lb, y_upper=y_hat_ub) uq_jsonobject = json.dumps(output) with open(str(self.Deployment)+"/uq_reg_log.json", "w") as f: json.dump(output, f) #To get best and medium UQ range of values from total predict interval y_hat_m=y_hat.tolist() y_hat_lb=y_hat_lb.tolist() upper_bound=y_hat_ub.tolist() y_hat_ub=y_hat_ub.tolist() for x in y_hat_lb: y_hat_ub.append(x) total_pi=y_hat_ub medium_UQ_range = y_hat_ub best_UQ_range= y_hat.tolist() ymean_upper=[] ymean_lower=[] y_hat_m=y_hat.tolist() for i in y_hat_m: y_hat_m_range= (i*20/100) x=i+y_hat_m_range y=i-y_hat_m_range ymean_upper.append(x) ymean_lower.append(y) min_best_uq_dist=round(min(best_UQ_range)) max_best_uq_dist=round(max(best_UQ_range)) # initializing ranges list_medium=list(filter(lambda x:not(min_best_uq_dist<=x<=max_best_uq_dist), total_pi)) list_best = y_hat_m X_test = np.squeeze(X_test) ''' uq_dict = {x_feature:X_test,'Observed':y_test,'Best_values': y_hat_m, 'Best__upper':ymean_upper, 'Best__lower':ymean_lower, 'Total_Low_PI': y_hat_lb, 'Total_Upper_PI': upper_bound, } print(uq_dict) uq_pred_df = pd.DataFrame(data=uq_dict) uq_pred_df_sorted = uq_pred_df.sort_values(by='Best_values') uq_pred_df_sorted.to_csv(str(self.Deployment)+"/uq_pred_df.csv",index = False) csv_path=str(self.Deployment)+"/uq_pred_df.csv" df=pd.read_csv(csv_path) self.log.info('uqMain() returns: observed_alphas_picp,observed_widths_mpiw,list_medium,list_best,metric_all.\n.') #Callconfidence olot fn only for UQTest interface if (aionstatus.lower() == 'aionuq'): #No need to showcase confidence plot for aion main pass else: self.aion_confidence_plot(df) ''' return total_picp_percentage,total_Uncertainty_percentage,list_medium,list_best,metric_all,json.loads(uq_jsonobject) except Exception as inst: exc = {"status":"FAIL","message":str(inst).strip('"')} out_exc = json.dumps(exc) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
uq_main.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import logging logging.getLogger('tensorflow').disabled = True import json #from nltk.corpus import stopwords from collections import Counter from matplotlib import pyplot import sys import os import matplotlib.pyplot as plt from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression from sklearn import datasets from sklearn.model_selection import train_test_split import pandas as pd from uq360.metrics.regression_metrics import compute_regression_metrics import numpy as np from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score from sklearn.metrics import roc_curve from sklearn.metrics import r2_score,mean_squared_error, explained_variance_score,mean_absolute_error from uq360.metrics import plot_uncertainty_by_feature, plot_picp_by_feature import sys import time from sklearn.metrics import confusion_matrix from pathlib import Path import logging import logging.config from os.path import expanduser import platform from sklearn.utils import shuffle class aionUQ: # def __init__(self,uqdf,targetFeature,xtrain,ytrain,xtest,ytest,uqconfig_base,uqconfig_meta,deployLocation,saved_model): def __init__(self,df,dfp,target,ProblemName,Params,model,modelfeatures,targetfeature): try: self.data=df self.dfFeatures=dfp self.uqconfig_base=Params self.uqconfig_meta=Params self.targetFeature=targetfeature self.log = logging.getLogger('aionUQ') self.target=target self.selectedfeature=modelfeatures self.y=self.target self.X=self.dfFeatures from appbe.dataPath import DEPLOY_LOCATION self.Deployment = os.path.join(DEPLOY_LOCATION,('UQTEST_'+str(int(time.time())))) os.makedirs(self.Deployment,exist_ok=True) self.basemodel=model self.model_name=ProblemName # self.X, self.y = shuffle(self.X, self.y) X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.2, random_state=0) self.xtrain = X_train self.xtest = X_test self.ytrain = y_train self.ytest = y_test # self.deployLocation=deployLocation except Exception as e: # self.log.info('<!------------- UQ model INIT Error ---------------> '+str(e)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) # self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) def totalUncertainty(self,df,basemodel,model_params): try: # from sklearn.model_selection import train_test_split # df=self.data # y=df[self.targetFeature] # X = df.drop(self.targetFeature, axis=1) if (isinstance(self.selectedfeature,list)): selectedfeature=[self.selectedfeature[0]] selectedfeature=' '.join(map(str,selectedfeature)) if (isinstance(self.targetFeature,list)): targetFeature=[self.targetFeature[0]] targetFeature=' '.join(map(str,targetFeature)) X = self.data[selectedfeature] y = self.data[targetFeature] X = X.values.reshape((-1,1)) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) key = 'criterion' #if key in model_params: try: #if model_params.has_key(key): if key in model_params: if (model_params['criterion']): uq_scoring_param=model_params.get('criterion') elif(model_params['criterion'] == None): uq_scoring_param='picp' else: uq_scoring_param='picp' else: uq_scoring_param='picp' pass except Exception as inst: uq_scoring_param='picp' # from sklearn.tree import DecisionTreeRegressor # from sklearn.linear_model import LinearRegression,Lasso,Ridge # from sklearn import linear_model # from sklearn.ensemble import RandomForestRegressor if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']: uq_scoring_param=uq_scoring_param else: uq_scoring_param='picp' uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params) # this will fit both the base and the meta model uqmodel_fit = uq_model.fit(X_train, y_train) y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test) y_hat_total_mean=np.mean(y_hat) y_hat_lb_total_mean=np.mean(y_hat_lb) y_hat_ub_total_mean=np.mean(y_hat_ub) mpiw_20_per=(y_hat_total_mean*20/100) mpiw_lower_range = y_hat_total_mean - mpiw_20_per mpiw_upper_range = y_hat_total_mean + mpiw_20_per from uq360.metrics import picp, mpiw observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub) observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub) observed_alphas_picp=round(observed_alphas_picp,2) observed_widths_mpiw=round(observed_widths_mpiw,2) picp_percentage= round(observed_alphas_picp*100) Uncertainty_percentage=round(100-picp_percentage) # self.log.info('Model total observed_widths_mpiw : '+str(observed_widths_mpiw)) # self.log.info('Model mpiw_lower_range : '+str(mpiw_lower_range)) # self.log.info('Model mpiw_upper_range : '+str(mpiw_upper_range)) # self.log.info('Model total picp_percentage : '+str(picp_percentage)) except Exception as e: print("totalUncertainty fn error: \n",e) return observed_alphas_picp,observed_widths_mpiw,picp_percentage,Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range def display_results(self,X_test, y_test, y_mean, y_lower, y_upper): try: global x_feature,y_feature if (isinstance(self.selectedfeature, list) or isinstance(self.selectedfeature, tuple)): x_feature=','.join(map(str, self.selectedfeature)) else: x_feature= str(self.selectedfeature) # self.selectedfeature=str(self.selectedfeature) X_test=np.squeeze(X_test) y_feature=str(self.targetFeature) pred_dict = {x_feature: X_test, 'y': y_test, 'y_mean': y_mean, 'y_upper': y_upper, 'y_lower': y_lower } pred_df = pd.DataFrame(data=pred_dict) x_feature1 = x_feature.split(',') pred_df_sorted = pred_df.sort_values(by=x_feature1) plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y'], 'o', label='Observed') plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y_mean'], '-', lw=2, label='Predicted') plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y_upper'], 'r--', lw=2, label='Upper Bound') plt.plot(pred_df_sorted[x_feature1[0]], pred_df_sorted['y_lower'], 'r--', lw=2, label='Lower Bound') plt.legend() plt.xlabel(x_feature1[0]) plt.ylabel(y_feature) plt.title('UQ Confidence Interval Plot.') # plt.savefig('uq_test_plt.png') ''' if os.path.exists(str(DEFAULT_FILE_PATH)+'/uq_test_plt.png'): os.remove(str(DEFAULT_FILE_PATH)+'/uq_test_plt.png') ''' plt.savefig(str(self.Deployment)+'/uq_test_plt.png') #plt.savefig(str(DEFAULT_FILE_PATH)+'/uq_test_plt.png') confidencePlot = os.path.join(self.Deployment,'picp_per_feature.png') plt.clf() plt.cla() plt.close() pltreg=plot_picp_by_feature(X_test, y_test, y_lower, y_upper, xlabel=x_feature) #pltreg.savefig('x.png') pltr=pltreg.figure ''' if os.path.exists(str(DEFAULT_FILE_PATH)+'/picp_per_feature.png'): os.remove(str(DEFAULT_FILE_PATH)+'/picp_per_feature.png') ''' pltr.savefig(str(self.Deployment)+'/picp_per_feature.png') picpPlot = os.path.join(self.Deployment,'picp_per_feature.png') #pltr.savefig(str(DEFAULT_FILE_PATH)+'/picp_per_feature.png') plt.clf() plt.cla() plt.close() except Exception as e: print("display exception: \n",e) # self.log.info('<!------------- UQ model Display Error ---------------> '+str(e)) return confidencePlot,picpPlot def classUncertainty(self,predprob_base): # from collections import Counter predc="Class_" classes = np.unique(self.y) total = len(self.y) list_predprob=[] counter = Counter(self.y) #for loop for test class purpose for k,v in counter.items(): n_samples = len(self.y[self.y==k]) per = ((v/total) * 100) prob_c=predprob_base[:,int(k)] list_predprob.append(prob_c) # #print("Class_{} : {}/{} percentage={}% \n".format(k,n_samples,total,per )) outuq={} for k in classes: predc += str(k) mean_predprob_class=np.mean(list_predprob[int(k)]) uncertainty=1-mean_predprob_class predc+='_Uncertainty' outuq[predc]=uncertainty predc="Class_" return outuq def uqMain_BBMClassification(self): # self.log.info('<!------------- Inside BlackBox MetaModel Classification process. ---------------> ') # import matplotlib.pyplot as plt try: from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelClassification except: ##In latest UQ360, library changed from BlackboxMetamodelClassification to MetamodelClassification. from uq360.algorithms.blackbox_metamodel import MetamodelClassification # from uq360.metrics.classification_metrics import area_under_risk_rejection_rate_curve,plot_risk_vs_rejection_rate,expected_calibration_error,compute_classification_metrics from uq360.metrics.classification_metrics import plot_reliability_diagram,area_under_risk_rejection_rate_curve,plot_risk_vs_rejection_rate,expected_calibration_error,compute_classification_metrics # from sklearn import datasets # from sklearn.model_selection import train_test_split # from sklearn.metrics import accuracy_score from sklearn.ensemble import GradientBoostingClassifier from sklearn.linear_model import LogisticRegression from sklearn.linear_model import SGDClassifier from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier # from sklearn.linear_model import LogisticRegression # import pandas as pd base_modelname=__class__.__name__ base_config = self.uqconfig_base meta_config = self.uqconfig_base model_name=self.basemodel.__class__.__name__ model_params=self.basemodel.get_params() try: #geting used features model_used_features=self.basemodel.feature_names_in_ except: pass X_train, X_test, y_train, y_test = self.xtrain,self.xtest,self.ytrain,self.ytest uq_scoring_param='accuracy' basemodel=None if (model_name == "GradientBoostingClassifier"): basemodel=GradientBoostingClassifier elif (model_name == "SGDClassifier"): basemodel=SGDClassifier elif (model_name == "GaussianNB"): basemodel=GaussianNB elif (model_name == "DecisionTreeClassifier"): basemodel=DecisionTreeClassifier elif(model_name == "RandomForestClassifier"): basemodel=RandomForestClassifier elif (model_name == "SVC"): basemodel=SVC elif(model_name == "KNeighborsClassifier"): basemodel=KNeighborsClassifier elif(model_name == "LogisticRegression"): basemodel=LogisticRegression else: basemodel=LogisticRegression try: try: ##Removed meta_config because leave meta model config as default ml model params uq_model = BlackboxMetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params) except: uq_model = BlackboxMetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params, meta_config=model_params) except: ##In latest version BlackboxMetamodelClassification name modified as MetamodelClassification try: ##Removed meta_config because leave meta model config as default ml model params uq_model = MetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params) except: uq_model = MetamodelClassification(base_model=self.basemodel, meta_model=basemodel,base_config=model_params, meta_config=model_params) # this will fit both the base and the meta model try: X_train=X_train[model_used_features] X_test=X_test[model_used_features] except: pass uqmodel_fit = uq_model.fit(X_train, y_train,base_is_prefitted=True,meta_train_data=(X_train, y_train)) # uqmodel_fit = uq_model.fit(X_train, y_train) #Test data pred, score y_t_pred, y_t_score = uq_model.predict(X_test) #predict probability # uq_pred_prob=uq_model.predict_proba(X_test) # predprob_base=basemodel.predict_proba(X_test)[:, :] #if (model_name == "SVC" or model_name == "SGDClassifier"): # if model_name in ['SVC','SGDClassifier']: if (model_name == "SVC"): from sklearn.calibration import CalibratedClassifierCV basemodel=SVC(**model_params) calibrated_svc = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3) calibrated_svc.fit(X_train, y_train) basepredict = basemodel.predict(X_test) predprob_base = calibrated_svc.predict_proba(X_test)[:, :] elif (model_name == "SGDClassifier"): from sklearn.calibration import CalibratedClassifierCV basemodel=SGDClassifier(**model_params) calibrated_svc = CalibratedClassifierCV(basemodel,method='sigmoid',cv=3) calibrated_svc.fit(X_train, y_train) basepredict = basemodel.predict(X_test) predprob_base = calibrated_svc.predict_proba(X_test)[:, :] else: base_mdl = basemodel(**model_params) basemodelfit = base_mdl.fit(X_train, y_train) basepredict = base_mdl.predict(X_test) predprob_base=base_mdl.predict_proba(X_test)[:, :] acc_score=accuracy_score(y_test, y_t_pred) test_accuracy_perc=round(100*acc_score) ''' bbm_c_plot = plot_risk_vs_rejection_rate( y_true=y_test, y_prob=predprob_base, selection_scores=y_t_score, y_pred=y_t_pred, plot_label=['UQ_risk_vs_rejection'], risk_func=accuracy_score, num_bins = 10 ) # This done by kiran, need to uncomment for GUI integration. try: bbm_c_plot_sub = bbm_c_plot[4] bbm_c_plot.savefig(str(self.Deployment)+'/plot_risk_vs_rejection_rate.png') riskPlot = os.path.join(self.Deployment,'plot_risk_vs_rejection_rate.png') except Exception as e: print(e) pass riskPlot = '' ''' riskPlot = '' ''' try: re_plot=plot_reliability_diagram(y_true=y_test, y_prob=predprob_base, y_pred=y_t_pred, plot_label=['UQModel reliability_diagram'], num_bins=10) # This done by kiran, need to uncomment for GUI integration. re_plot_sub = re_plot[4] # re_plot_sub = re_plot re_plot_sub.savefig(str(self.Deployment)+'/plot_reliability_diagram.png') reliability_plot = os.path.join(self.Deployment,'plot_reliability_diagram.png') except Exception as e: print(e) pass reliability_plot = '' ''' reliability_plot = '' uq_aurrrc=area_under_risk_rejection_rate_curve( y_true=y_test, y_prob=predprob_base, y_pred=y_t_pred, selection_scores=y_t_score, attributes=None, risk_func=accuracy_score,subgroup_ids=None, return_counts=False, num_bins=10) uq_aurrrc=uq_aurrrc test_accuracy_perc=round(test_accuracy_perc) #metric_all=compute_classification_metrics(y_test, y_prob, option='all') metric_all=compute_classification_metrics(y_test, predprob_base, option='accuracy') #expected_calibration_error uq_ece=expected_calibration_error(y_test, y_prob=predprob_base,y_pred=y_t_pred, num_bins=10, return_counts=False) uq_aurrrc=uq_aurrrc confidence_score=acc_score-uq_ece ece_confidence_score=round(confidence_score,2) # Model uncertainty using ECE score # model_uncertainty_ece = 1-ece_confidence_score # #print("model_uncertainty1: \n",model_uncertainty_ece) #Uncertainty Using model inherent predict probability mean_predprob_total=np.mean(predprob_base) model_uncertainty = 1-mean_predprob_total model_confidence=mean_predprob_total model_confidence = round(model_confidence,2) # To get each class values and uncertainty outuq = self.classUncertainty(predprob_base) # Another way to get conf score model_uncertainty_per=round((model_uncertainty*100),2) # model_confidence_per=round((model_confidence*100),2) model_confidence_per=round((ece_confidence_score*100),2) acc_score_per = round((acc_score*100),2) uq_ece_per=round((uq_ece*100),2) output={} recommendation = "" if (uq_ece > 0.5): # RED text recommendation = 'Model has high ece (expected calibration error) score compare to threshold (50%),not good to deploy. Add more input data across all feature ranges to train base model, also try with different classification algorithms/ensembling to reduce ECE (ECE~0).' msg = 'Bad' else: # self.log.info('Model has good ECE score and accuracy, ready to deploy.\n.') if (uq_ece <= 0.1 and model_confidence >= 0.9): # Green Text recommendation = 'Model has best calibration score (near to 0) and good confidence score , ready to deploy. ' msg = 'Best' else: # Orange recommendation = 'Model has average confidence score (ideal is >90% confidence) and good ECE score (ideal is <10% error).Model can be improved by adding more training data across all feature ranges and re-training the model.' msg = 'Good' #Adding each class uncertainty value output['Problem']= 'Classification' output['recommend']= 'recommend' output['msg']= msg output['UQ_Area_Under_Risk_Rejection_Rate_Curve']=round(uq_aurrrc,4) output['Model_Total_Confidence']=(str(model_confidence_per)+str('%')) output['Expected_Calibration_Error']=(str(uq_ece_per)+str('%')) output['Model_Total_Uncertainty']=(str(model_uncertainty_per)+str('%')) # output['Risk Plot'] = str(riskPlot) # output['Reliability Plot'] = str(reliability_plot) for k,v in outuq.items(): output[k]=(str(round((v*100),2))+str(' %')) output['Recommendation']=recommendation # output['user_msg']='Please check the plot for more understanding of model uncertainty' output['Metric_Accuracy_Score']=(str(acc_score_per)+str(' %')) outputs = json.dumps(output) with open(str(self.Deployment)+"/uq_classification_log.json", "w") as f: json.dump(output, f) return test_accuracy_perc,uq_ece,outputs def aion_confidence_plot(self,df): try: global x_feature df=df df = df.sort_values(by=self.selectedfeature) best_values=df.Best_values.to_list() best_upper=df.Best__upper.to_list() best_lower=df.Best__lower.to_list() Total_Upper_PI=df.Total_Upper_PI.to_list() Total_Low_PI=df.Total_Low_PI.to_list() Obseved = df.Observed.to_list() x_feature1 = x_feature.split(',') plt.plot(df[x_feature1[0]], df['Observed'], 'o', label='Observed') plt.plot(df[x_feature1[0]], df['Best__upper'],'r--', lw=2, color='grey') plt.plot(df[x_feature1[0]], df['Best__lower'],'r--', lw=2, color='grey') plt.plot(df[x_feature1[0]], df['Best_values'], 'r--', lw=2, label='MeanPrediction',color='red') plt.fill_between(df[x_feature1[0]], Total_Upper_PI, Total_Low_PI, label='Good Confidence', color='lightblue', alpha=.5) plt.fill_between(df[x_feature1[0]],best_lower, best_upper,label='Best Confidence', color='orange', alpha=.5) plt.legend() plt.xlabel(x_feature1[0]) plt.ylabel(self.targetFeature) plt.title('UQ Best & Good Area Plot') ''' if os.path.exists(str(DEFAULT_FILE_PATH)+'/uq_confidence_plt.png'): os.remove(str(DEFAULT_FILE_PATH)+'/uq_confidence_plt.png') plt.savefig(str(DEFAULT_FILE_PATH)+'/uq_confidence_plt.png') ''' plt.savefig(str(self.Deployment)+'/uq_confidence_plt.png') uq_confidence_plt = os.path.join(str(self.Deployment),'uq_confidence_plt.png') except Exception as inst: print('-----------dsdas->',inst) uq_confidence_plt = '' return uq_confidence_plt def uqMain_BBMRegression(self): # modelName = "" # self.log.info('<!------------- Inside BlockBox MetaModel Regression process. ---------------> ') try: from uq360.algorithms.blackbox_metamodel import BlackboxMetamodelRegression import pandas as pd base_modelname=__class__.__name__ base_config = self.uqconfig_base meta_config = self.uqconfig_base model_name=self.basemodel.__class__.__name__ model_params=self.basemodel.get_params() # #print("model_params['criterion']: \n",model_params['criterion']) key = 'criterion' #if key in model_params: try: #if model_params.has_key(key): if key in model_params: if (model_params['criterion']): uq_scoring_param=model_params.get('criterion') elif(model_params['criterion'] == None): uq_scoring_param='picp' else: uq_scoring_param='picp' else: uq_scoring_param='picp' pass except Exception as inst: uq_scoring_param='picp' # modelname='sklearn.linear_model'+'.'+model_name # self.xtrain = self.xtrain.values.reshape((-1,1)) # self.xtest = self.xtest.values.reshape((-1,1)) if (isinstance(self.selectedfeature,list)): selectedfeature=[self.selectedfeature[0]] selectedfeature=' '.join(map(str,selectedfeature)) if (isinstance(self.targetFeature,list)): targetFeature=[self.targetFeature[0]] targetFeature=' '.join(map(str,targetFeature)) X = self.data[selectedfeature] y = self.data[targetFeature] X = X.values.reshape((-1,1)) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) #Geeting trained model name and to use the model in BlackboxMetamodelRegression from sklearn.tree import DecisionTreeRegressor from sklearn.linear_model import LinearRegression,Lasso,Ridge from sklearn.ensemble import RandomForestRegressor if (model_name == "DecisionTreeRegressor"): basemodel=DecisionTreeRegressor elif (model_name == "LinearRegression"): basemodel=LinearRegression elif (model_name == "Lasso"): basemodel=Lasso elif (model_name == "Ridge"): basemodel=Ridge elif(model_name == "RandomForestRegressor"): basemodel=RandomForestRegressor else: basemodel=LinearRegression if uq_scoring_param in ['rmse', 'nll','auucc_gain','picp','mpiw','r2']: if (uq_scoring_param.lower() == 'picp'): uq_scoring_param='prediction interval coverage probability score (picp)' else: uq_scoring_param=uq_scoring_param else: uq_scoring_param='prediction interval coverage probability score (picp)' uq_model = BlackboxMetamodelRegression(base_model=basemodel, meta_model=basemodel, base_config=model_params, meta_config=model_params) # this will fit both the base and the meta model uqmodel_fit = uq_model.fit(X_train, y_train) y_hat, y_hat_lb, y_hat_ub = uq_model.predict(X_test) from uq360.metrics import picp, mpiw observed_alphas_picp = picp(y_test, y_hat_lb, y_hat_ub) observed_widths_mpiw = mpiw(y_hat_lb, y_hat_ub) picp_percentage= round(observed_alphas_picp*100) Uncertainty_percentage=round(100-picp_percentage) # UQ metamodel regression have metrics as follows, “rmse”, “nll”, “auucc_gain”, “picp”, “mpiw”, “r2” metric_all=compute_regression_metrics(y_test, y_hat,y_hat_lb, y_hat_ub,option=uq_scoring_param,nll_fn=None) metric_used='' for k,v in metric_all.items(): metric_used=str(round(v,2)) # Determine the confidence level and recommentation to the tester # test_data=y_test observed_alphas_picp=round(observed_alphas_picp,2) observed_widths_mpiw=round(observed_widths_mpiw,2) #Calculate total uncertainty for all features # total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage = self.totalUncertainty(self.data) # df1=self.data total_picp,total_mpiw,total_picp_percentage,total_Uncertainty_percentage,mpiw_lower_range,mpiw_upper_range = self.totalUncertainty(self.data,basemodel,model_params) recommendation="" observed_widths_mpiw = round((observed_widths_mpiw/1000000)*100) if observed_widths_mpiw > 100: observed_widths_mpiw = 100 output={} if (observed_alphas_picp >= 0.90 and total_picp >= 0.75): # GREEN text recommendation = "Model has good confidence and MPIW score, ready to deploy." msg='Good' elif ((observed_alphas_picp >= 0.50 and observed_alphas_picp <= 0.90) and (total_picp >= 0.50)): # Orange recommendation = " Model has average confidence compare to threshold (ideal is both model confidence and MPIW should be >90%) .Model can be improved by adding more training data across all feature ranges and re-training the model." msg = 'Average' else: # RED text recommendation = "Model has less confidence compare to threshold (ideal is both model confidence and MPIW should be >90%), need to be add more input data across all feature ranges and retrain base model, also try with different regression algorithms/ensembling." msg = 'Bad' #Build uq json info dict output['Model_total_confidence']=(str(total_picp_percentage)+'%') output['Model_total_Uncertainty']=(str(total_Uncertainty_percentage)+'%') output['Selected_feature_confidence']=(str(picp_percentage)+'%') output['Selected_feature_Uncertainty']=(str(Uncertainty_percentage)+'%') output['Prediction_Interval_Coverage_Probability']=observed_alphas_picp output['Mean_Prediction_Interval_Width']=str(observed_widths_mpiw)+'%' output['Desirable_MPIW_range']=(str(round(mpiw_lower_range))+str(' - ')+str(round(mpiw_upper_range))) output['Recommendation']=str(recommendation) output['Metric_used']=uq_scoring_param output['Metric_value']=metric_used output['Problem']= 'Regression' output['recommend']= 'recommend' output['msg'] = msg with open(str(self.Deployment)+"/uq_reg_log.json", "w") as f: json.dump(output, f) #To get best and medium UQ range of values from total predict interval y_hat_m=y_hat.tolist() y_hat_lb=y_hat_lb.tolist() upper_bound=y_hat_ub.tolist() y_hat_ub=y_hat_ub.tolist() for x in y_hat_lb: y_hat_ub.append(x) total_pi=y_hat_ub medium_UQ_range = y_hat_ub best_UQ_range= y_hat.tolist() ymean_upper=[] ymean_lower=[] y_hat_m=y_hat.tolist() for i in y_hat_m: y_hat_m_range= (i*20/100) x=i+y_hat_m_range y=i-y_hat_m_range ymean_upper.append(x) ymean_lower.append(y) min_best_uq_dist=round(min(best_UQ_range)) max_best_uq_dist=round(max(best_UQ_range)) # initializing ranges list_medium=list(filter(lambda x:not(min_best_uq_dist<=x<=max_best_uq_dist), total_pi)) list_best = y_hat_m ''' print(X_test) print(X_test) X_test = np.squeeze(X_test) print(x_feature) ''' uq_dict = pd.DataFrame(X_test) #print(uq_dict) uq_dict['Observed'] = y_test uq_dict['Best_values'] = y_hat_m uq_dict['Best__upper'] = ymean_upper uq_dict['Best__lower'] = ymean_lower uq_dict['Total_Low_PI'] = y_hat_lb uq_dict['Total_Upper_PI'] = upper_bound ''' uq_dict = {x_feature:X_test,'Observed':y_test,'Best_values': y_hat_m, 'Best__upper':ymean_upper, 'Best__lower':ymean_lower, 'Total_Low_PI': y_hat_lb, 'Total_Upper_PI': upper_bound, }''' uq_pred_df = pd.DataFrame(data=uq_dict) uq_pred_df_sorted = uq_pred_df.sort_values(by='Best_values') uq_pred_df_sorted.to_csv(str(self.Deployment)+"/uq_pred_df.csv",index = False) csv_path=str(self.Deployment)+"/uq_pred_df.csv" df=pd.read_csv(csv_path) # self.log.info('uqMain() returns: observed_alphas_picp,observed_widths_mpiw,list_medium,list_best,metric_all.\n.') # confidenceplot = self.aion_confidence_plot(df) # output['Confidence Plot']= confidenceplot uq_jsonobject = json.dumps(output) print("UQ regression problem training completed...\n") return observed_alphas_picp,observed_widths_mpiw,list_medium,list_best,metric_all,uq_jsonobject except Exception as inst: print('-------',inst) exc = {"status":"FAIL","message":str(inst).strip('"')} out_exc = json.dumps(exc)
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
associationrules.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import pandas as pd import numpy as np from mlxtend.frequent_patterns import apriori, association_rules from mlxtend.preprocessing import TransactionEncoder import matplotlib.pyplot as plt import json import logging import os,sys def hot_encode(x): if(int(x)<= 0): return 0 if(int(x)>= 1): return 1 class associationrules: def __init__(self,dataframe,association_rule_conf,modelparam,invoiceNoFeature,itemFeature): self.minSupport = modelparam['minSupport'] self.metric = modelparam['metric'] self.minThreshold = modelparam['minThreshold'] self.data = dataframe self.invoiceNoFeature = invoiceNoFeature self.itemFeature = itemFeature self.log = logging.getLogger('eion') def apply_associationRules(self,outputLocation): self.data= self.data[[self.itemFeature,self.invoiceNoFeature]] self.data[self.itemFeature] = self.data[self.itemFeature].str.strip() self.data.dropna(axis = 0, subset =[self.invoiceNoFeature], inplace = True) self.data[self.invoiceNoFeature] = self.data[self.invoiceNoFeature].astype('str') self.data = self.data.groupby([self.invoiceNoFeature,self.itemFeature]).size() self.data=self.data.unstack().reset_index().fillna('0').set_index(self.invoiceNoFeature) self.data = self.data.applymap(hot_encode) ohe_df = self.data ''' print(self.data) sys.exit() items = [] for col in list(self.data): ucols = self.data[col].dropna().unique() #print('ucols :',ucols) if len(ucols) > 0: items = items + list(set(ucols) - set(items)) #items = self.data.apply(lambda col: col.unique()) #print(items) #items = (self.data[self.masterColumn].unique()) #print(items) self.log.info("-------> Total Unique Items: "+str(len(items))) encoded_vals = [] for index, row in self.data.iterrows(): labels = {} uncommons = list(set(items) - set(row)) commons = list(set(items).intersection(row)) for uc in uncommons: labels[uc] = 0 for com in commons: labels[com] = 1 encoded_vals.append(labels) ohe_df = pd.DataFrame(encoded_vals) #print(ohe_df) ''' freq_items = apriori(ohe_df, min_support=self.minSupport, use_colnames=True) self.log.info('Status:- |... AssociationRule Algorithm applied: Apriori') if not freq_items.empty: self.log.info("\n------------ Frequent Item Set --------------- ") self.log.info(freq_items) save_freq_items = pd.DataFrame() save_freq_items["itemsets"] = freq_items["itemsets"].apply(lambda x: ', '.join(list(x))).astype("unicode") outputfile = os.path.join(outputLocation,'frequentItems.csv') save_freq_items.to_csv(outputfile) self.log.info('-------> FreqentItems File Name:'+outputfile) rules = association_rules(freq_items, metric=self.metric, min_threshold=self.minThreshold) if not rules.empty: #rules = rules.sort_values(['confidence', 'lift'], ascending =[False, False]) self.log.info("\n------------ Rules --------------- ") for index, row in rules.iterrows(): self.log.info("------->Rule: "+ str(row['antecedents']) + " -> " + str(row['consequents'])) self.log.info("---------->Support: "+ str(row['support'])) self.log.info("---------->Confidence: "+ str(row['confidence'])) self.log.info("---------->Lift: "+ str(row['lift'])) #rules['antecedents'] = list(rules['antecedents']) #rules['consequents'] = list(rules['consequents']) rules["antecedents"] = rules["antecedents"].apply(lambda x: ', '.join(list(x))).astype("unicode") rules["consequents"] = rules["consequents"].apply(lambda x: ', '.join(list(x))).astype("unicode") self.log.info("\n------------ Rules End --------------- ") outputfile = os.path.join(outputLocation,'associationRules.csv') self.log.info('-------> AssciationRule File Name:'+outputfile) rules.to_csv(outputfile) else: self.log.info("\n------------ Frequent Item Set --------------- ") self.log.info("Status:- |... There are no association found in frequent items above that threshold (minThreshold)") else: self.log.info("\n------------ Frequent Item Set --------------- ") self.log.info("Status:- |... There are no frequent items above that threshold (minSupport)") evaulatemodel = '{"Model":"Apriori","Score":"NA"}' return(evaulatemodel)
featureReducer.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import sys import json import datetime,time,timeit import itertools #Sci-Tools imports import numpy as np import pandas as pd import math from statsmodels.tsa.stattools import adfuller from scipy.stats.stats import pearsonr from numpy import cumsum, log, polyfit, sqrt, std, subtract from numpy.random import randn #SDP1 class import from feature_engineering.featureImportance import featureImp from sklearn.feature_selection import VarianceThreshold import logging class featureReducer(): def __init__(self): self.pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] self.log = logging.getLogger('eion') def startReducer(self,df,data_columns,target,var_threshold): self.log.info('\n---------- Feature Reducer Start ----------') dataframe = df columns=data_columns target = target corrThreshold=1.0 categoricalFeatures=[] nonNumericFeatures=[] constFeatures=[] qconstantColumns=[] DtypesDic={} numericFeatures=[] nonNumericalFeatures=[] similarFeatureGroups=[] try: dataFDtypes=self.dataFramecolType(dataframe) for item in dataFDtypes: DtypesDic[item[0]] = item[1] if item[1] in self.pandasNumericDtypes: numericFeatures.append(item[0]) else: nonNumericFeatures.append(item[0]) #Checking for constant data features for col in columns: try: distCount = len(dataframe[col].unique()) if(distCount == 1): constFeatures.append(col) except Exception as inst: self.log.info('Unique Testing Fail for Col '+str(col)) numericalDataCols,nonNumericalDataCols = [],[] #Removing constant data features if(len(constFeatures) != 0): self.log.info( '-------> Constant Features: '+str(constFeatures)) numericalDataCols = list(set(numericFeatures) - set(constFeatures)) nonNumericalDataCols = list(set(nonNumericFeatures) - set(constFeatures)) else: numericalDataCols = list(set(numericFeatures)) nonNumericalDataCols = list(set(nonNumericFeatures)) if(len(numericalDataCols) > 1): if var_threshold !=0: qconstantFilter = VarianceThreshold(threshold=var_threshold) tempDf=df[numericalDataCols] qconstantFilter.fit(tempDf) qconstantColumns = [column for column in numericalDataCols if column not in tempDf.columns[qconstantFilter.get_support()]] if(len(qconstantColumns) != 0): if target != '' and target in qconstantColumns: qconstantColumns.remove(target) self.log.info( '-------> Low Variant Features: '+str(qconstantColumns)) self.log.info('Status:- |... Low variance feature treatment done: '+str(len(qconstantColumns))+' low variance features found') numericalDataCols = list(set(numericalDataCols) - set(qconstantColumns)) else: self.log.info('Status:- |... Low variance feature treatment done: Found zero or 1 numeric feature') #Minimum of two columns required for data integration if(len(numericalDataCols) > 1): numColPairs = list(itertools.product(numericalDataCols, numericalDataCols)) noDupList = [] for item in numColPairs: if(item[0] != item[1]): noDupList.append(item) numColPairs = noDupList tempArray = [] for item in numColPairs: tempCorr = np.abs(dataframe[item[0]].corr(dataframe[item[1]])) if(tempCorr > corrThreshold): tempArray.append(item[0]) tempArray = np.unique(tempArray) nonsimilarNumericalCols = list(set(numericalDataCols) - set(tempArray)) ''' Notes: tempArray: List of all similar/equal data features nonsimilarNumericalCols: List of all non-correlatable data features ''' #Grouping similar/equal features groupedFeatures = [] if(len(numericalDataCols) != len(nonsimilarNumericalCols)): #self.log.info( '-------> Similar/Equal Features: Not Any') #Correlation dictionary corrDic = {} for feature in tempArray: temp = [] for col in tempArray: tempCorr = np.abs(dataframe[feature].corr(dataframe[col])) temp.append(tempCorr) corrDic[feature] = temp #Similar correlation dataframe corrDF = pd.DataFrame(corrDic,index = tempArray) corrDF.loc[:,:] = np.tril(corrDF, k=-1) alreadyIn = set() similarFeatures = [] for col in corrDF: perfectCorr = corrDF[col][corrDF[col] > corrThreshold].index.tolist() if perfectCorr and col not in alreadyIn: alreadyIn.update(set(perfectCorr)) perfectCorr.append(col) similarFeatures.append(perfectCorr) self.log.info( '-------> No Similar/Equal Features: '+str(len(similarFeatures))) for i in range(0,len(similarFeatures)): similarFeatureGroups.append(similarFeatures[i]) #self.log.info((str(i+1)+' '+str(similarFeatures[i]))) self.log.info('-------> Similar/Equal Features: '+str(similarFeatureGroups)) self.log.info('-------> Non Similar Features :'+str(nonsimilarNumericalCols)) updatedSimFeatures = [] for items in similarFeatures: if(target != '' and target in items): for p in items: updatedSimFeatures.append(p) else: updatedSimFeatures.append(items[0]) newTempFeatures = list(set(updatedSimFeatures + nonsimilarNumericalCols)) updatedNumFeatures = newTempFeatures #self.log.info( '\n <--- Merged similar/equal features into one ---> ') updatedFeatures = list(set(newTempFeatures + nonNumericalDataCols)) self.log.info('Status:- |... Similar feature treatment done: '+str(len(similarFeatures))+' similar features found') else: updatedNumFeatures = numericalDataCols updatedFeatures = list(set(columns) - set(constFeatures)-set(qconstantColumns)) self.log.info( '-------> Similar/Equal Features: Not Any') self.log.info('Status:- |... Similar feature treatment done: No similar features found') else: updatedNumFeatures = numericalDataCols updatedFeatures = list(set(columns) - set(constFeatures)-set(qconstantColumns)) self.log.info( '\n-----> Need minimum of two numerical features for data integration.') self.log.info('Status:- |... Similar feature treatment done: Found zero or 1 numeric feature') self.log.info('---------- Feature Reducer End ----------\n') return updatedNumFeatures,updatedFeatures,similarFeatureGroups except Exception as inst: self.log.info("feature Reducer failed "+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) return [],[] def dataFramecolType(self,dataFrame): dataFDtypes=[] try: dataColumns=list(dataFrame.columns) for i in dataColumns: dataType=dataFrame[i].dtypes dataFDtypes.append(tuple([i,str(dataType)])) return dataFDtypes except: self.log.info("error in dataFramecolyType") return dataFDtypes
featureSelector.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import os import sys import json import datetime,time,timeit import itertools #Sci-Tools imports import numpy as np import pandas as pd import math from statsmodels.tsa.stattools import adfuller from scipy.stats.stats import pearsonr from numpy import cumsum, log, polyfit, sqrt, std, subtract from numpy.random import randn from sklearn.metrics import normalized_mutual_info_score from sklearn.feature_selection import mutual_info_regression import logging #SDP1 class import from feature_engineering.featureImportance import featureImp from feature_engineering.featureReducer import featureReducer from sklearn.linear_model import Lasso, LogisticRegression from sklearn.feature_selection import SelectFromModel from sklearn.ensemble import ExtraTreesClassifier from sklearn.decomposition import PCA from sklearn.decomposition import TruncatedSVD from sklearn.decomposition import FactorAnalysis from sklearn.decomposition import FastICA from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.preprocessing import MinMaxScaler from sklearn.feature_selection import RFE def ranking(ranks, names, order=1): minmax = MinMaxScaler() ranks = minmax.fit_transform(order*np.array([ranks]).T).T[0] ranks = map(lambda x: round(x,2), ranks) return dict(zip(names, ranks)) # noinspection PyPep8Naming class featureSelector(): def __init__(self): self.pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] self.log = logging.getLogger('eion') def startSelector(self,df,conf_json,textFeatures,targetFeature,problem_type): try: categoricalMaxLabel = int(conf_json['categoryMaxLabel']) pca='None' pcaReducerStatus = conf_json['featureEngineering']['PCA'] svdReducerStatus = conf_json['featureEngineering']['SVD'] factorReducerStatus = conf_json['featureEngineering']['FactorAnalysis'] icaReducerStatus = conf_json['featureEngineering']['ICA'] nfeatures=float(conf_json['featureEngineering']['numberofComponents']) statisticalConfig = conf_json['statisticalConfig'] corrThresholdInput = float(statisticalConfig.get('correlationThresholdFeatures',0.50)) corrThresholdTarget = float(statisticalConfig.get('correlationThresholdTarget',0.85)) pValThresholdInput = float(statisticalConfig.get('pValueThresholdFeatures',0.05)) pValThresholdTarget = float(statisticalConfig.get('pValueThresholdTarget',0.04)) varThreshold = float(statisticalConfig.get('varianceThreshold',0.01)) allFeaturesSelector = conf_json['featureSelection']['allFeatures'] correlationSelector = conf_json['featureSelection']['statisticalBased'] modelSelector = conf_json['featureSelection']['modelBased'] featureSelectionMethod = conf_json['selectionMethod']['featureSelection'] featureEngineeringSelector = conf_json['selectionMethod']['featureEngineering'] if featureSelectionMethod == 'True': featureEngineeringSelector = 'False' # if feature engineering is true then we check weather PCA is true or svd is true. By default we will run PCA if featureEngineeringSelector == 'True': if pcaReducerStatus == 'True': svdReducerStatus = 'False' factorReducerStatus=='False' icaReducerStatus == 'False' elif svdReducerStatus == 'True': pcaReducerStatus = 'False' factorReducerStatus=='False' icaReducerStatus == 'False' elif factorReducerStatus=='True': pcaReducerStatus=='False' svdReducerStatus=='False' icaReducerStatus=='False' elif icaReducerStatus=='True': pcaReducerStatus=="False" svdReducerStatus=="False" factorReducerStatus=="False" else: pcaReducerStatus = 'True' if featureSelectionMethod == 'False' and featureEngineeringSelector == 'False': featureSelectionMethod = 'True' if featureSelectionMethod == 'True': if modelSelector == 'False' and correlationSelector == 'False' and allFeaturesSelector == 'False': modelSelector = 'True' reductionMethod = 'na' bpca_features = [] #nfeatures = 0 if 'maxClasses' in conf_json: maxclasses = int(conf_json['maxClasses']) else: maxClasses = 20 target = targetFeature self.log.info('-------> Feature: '+str(target)) dataFrame = df pThresholdInput=pValThresholdInput pThresholdTarget=pValThresholdTarget cThresholdInput=corrThresholdInput cThresholdTarget=corrThresholdTarget numericDiscreteFeatures=[] similarGruops=[] numericContinuousFeatures=[] categoricalFeatures=[] nonNumericFeatures=[] apca_features = [] dTypesDic={} dataColumns = list(dataFrame.columns) features_list = list(dataFrame.columns) modelselectedFeatures=[] topFeatures=[] allFeatures=[] targetType="" # just to make sure feature engineering is false #print(svdReducerStatus) if featureEngineeringSelector.lower() == 'false' and correlationSelector.lower() == "true" and len(textFeatures) <= 0: reducerObj=featureReducer() self.log.info(featureReducer.__doc__) self.log.info('Status:- |... Feature reduction started') updatedNumericFeatures,updatedFeatures,similarGruops=reducerObj.startReducer(dataFrame,dataColumns,target,varThreshold) if len(updatedFeatures) <= 1: self.log.info('=======================================================') self.log.info('Most of the features are of low variance. Use Model based feature engineering for better result') self.log.info('=======================================================') raise Exception('Most of the features are of low variance. Use Model based feature engineering for better result') dataFrame=dataFrame[updatedFeatures] dataColumns=list(dataFrame.columns) self.log.info('Status:- |... Feature reduction completed') elif (pcaReducerStatus.lower() == "true" or svdReducerStatus.lower() == 'true' or factorReducerStatus.lower() == 'true' or icaReducerStatus.lower()=='true') and featureEngineeringSelector.lower() == 'true': # check is PCA or SVD is true pcaColumns=[] #print(svdReducerStatus.lower()) if target != "": dataColumns.remove(target) targetArray=df[target].values targetArray.shape = (len(targetArray), 1) if pcaReducerStatus.lower() == "true": if nfeatures == 0: pca = PCA(n_components='mle',svd_solver = 'full') elif nfeatures < 1: pca = PCA(n_components=nfeatures,svd_solver = 'full') else: pca = PCA(n_components=int(nfeatures)) pca.fit(df[dataColumns]) bpca_features = dataColumns.copy() pcaArray=pca.transform(df[dataColumns]) method = 'PCA' elif svdReducerStatus.lower() == 'true': if nfeatures < 2: nfeatures = 2 pca = TruncatedSVD(n_components=int(nfeatures), n_iter=7, random_state=42) pca.fit(df[dataColumns]) bpca_features = dataColumns.copy() pcaArray=pca.transform(df[dataColumns]) method = 'SVD' elif factorReducerStatus.lower()=='true': if int(nfeatures) == 0: pca=FactorAnalysis() else: pca=FactorAnalysis(n_components=int(nfeatures)) pca.fit(df[dataColumns]) bpca_features = dataColumns.copy() pcaArray=pca.transform(df[dataColumns]) method = 'FactorAnalysis' elif icaReducerStatus.lower()=='true': if int(nfeatures) == 0: pca=FastICA() else: pca=FastICA(n_components=int(nfeatures)) pca.fit(df[dataColumns]) bpca_features = dataColumns.copy() pcaArray=pca.transform(df[dataColumns]) method = 'IndependentComponentAnalysis' pcaDF=pd.DataFrame(pcaArray) #print(pcaDF) for i in range(len(pcaDF.columns)): pcaColumns.append(method+str(i)) topFeatures=pcaColumns apca_features= pcaColumns.copy() if target != '': pcaColumns.append(target) scaledDf = pd.DataFrame(np.hstack((pcaArray, targetArray)),columns=pcaColumns) else: scaledDf = pd.DataFrame(pcaArray,columns=pcaColumns) self.log.info("<--- dataframe after dimensionality reduction using "+method) self.log.info(scaledDf.head()) dataFrame=scaledDf dataColumns=list(dataFrame.columns) self.log.info('Status:- |... Feature reduction started') self.log.info('Status:- |... '+method+' done') self.log.info('Status:- |... Feature reduction completed') self.numofCols = dataFrame.shape[1] self.numOfRows = dataFrame.shape[0] dataFDtypes=[] for i in dataColumns: dataType=dataFrame[i].dtypes dataFDtypes.append(tuple([i,str(dataType)])) #Categoring datatypes for item in dataFDtypes: dTypesDic[item[0]] = item[1] if item[0] != target: if item[1] in ['int16', 'int32', 'int64'] : numericDiscreteFeatures.append(item[0]) elif item[1] in ['float16', 'float32', 'float64']: numericContinuousFeatures.append(item[0]) else: nonNumericFeatures.append(item[0]) self.numOfRows = dataFrame.shape[0] ''' cFRatio = 0.01 if(self.numOfRows < 1000): cFRatio = 0.2 elif(self.numOfRows < 10000): cFRatio = 0.1 elif(self.numOfRows < 100000): cFRatio = 0.01 ''' for i in numericDiscreteFeatures: nUnique=len(dataFrame[i].unique().tolist()) nRows=self.numOfRows if nUnique <= categoricalMaxLabel: categoricalFeatures.append(i) for i in numericContinuousFeatures: nUnique=len(dataFrame[i].unique().tolist()) nRows=self.numOfRows if nUnique <= categoricalMaxLabel: categoricalFeatures.append(i) discreteFeatures=list(set(numericDiscreteFeatures)-set(categoricalFeatures)) numericContinuousFeatures=list(set(numericContinuousFeatures)-set(categoricalFeatures)) self.log.info('-------> Numerical continuous features :'+(str(numericContinuousFeatures))[:500]) self.log.info('-------> Numerical discrete features :'+(str(discreteFeatures))[:500]) self.log.info('-------> Non numerical features :'+(str(nonNumericFeatures))[:500]) self.log.info('-------> Categorical Features :'+(str(categoricalFeatures))[:500]) if target !="" and featureEngineeringSelector.lower() == "false" and correlationSelector.lower() == "true": self.log.info('\n------- Feature Based Correlation Analysis Start ------') start = time.time() featureImpObj = featureImp() topFeatures,targetType= featureImpObj.FFImpNew(dataFrame,numericContinuousFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,dTypesDic,target,pThresholdInput,pThresholdTarget,cThresholdInput,cThresholdTarget,categoricalMaxLabel,problem_type,maxClasses) #topFeatures,targetType= featureImpObj.FFImp(dataFrame,numericContinuousFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,dTypesDic,target,pThreshold,cThreshold,categoricalMaxLabel,problem_type,maxClasses) self.log.info('-------> Highly Correlated Features Using Correlation Techniques'+(str(topFeatures))[:500]) executionTime=time.time() - start self.log.info('-------> Time Taken: '+str(executionTime)) self.log.info('Status:- |... Correlation based feature selection done: '+str(len(topFeatures))+' out of '+str(len(dataColumns))+' selected') self.log.info('------- Feature Based Correlation Analysis End ------>\n') if targetType == '': if problem_type.lower() == 'classification': targetType = 'categorical' if problem_type.lower() == 'regression': targetType = 'continuous' if target !="" and featureEngineeringSelector.lower() == "false" and modelSelector.lower() == "true": self.log.info('\n------- Model Based Correlation Analysis Start -------') start = time.time() updatedFeatures = dataColumns updatedFeatures.remove(target) #targetType = problem_type.lower() modelselectedFeatures=[] if targetType == 'categorical': try: xtrain=dataFrame[updatedFeatures] ytrain=dataFrame[target] etc = ExtraTreesClassifier(n_estimators=100) etc.fit(xtrain, ytrain) rfe = RFE(etc, n_features_to_select=1, verbose =0 ) rfe.fit(xtrain, ytrain) # total list of features ranks = {} ranks["RFE_LR"] = ranking(list(map(float, rfe.ranking_)), dataColumns, order=-1) for item in ranks["RFE_LR"]: if ranks["RFE_LR"][item]>0.30: #threshold as 30% modelselectedFeatures.append(item) modelselectedFeatures = list(modelselectedFeatures) self.log.info('-------> Highly Correlated Features Using Treeclassifier + RFE: '+(str(modelselectedFeatures))[:500]) except Exception as e: self.log.info('---------------->'+str(e)) selector = SelectFromModel(ExtraTreesClassifier()) xtrain=dataFrame[updatedFeatures] ytrain=dataFrame[target] selector.fit(xtrain,ytrain) modelselectedFeatures = xtrain.columns[(selector.get_support())].tolist() self.log.info('-------> Highly Correlated Features Using Treeclassifier: '+(str(modelselectedFeatures))[:500]) else: try: xtrain=dataFrame[updatedFeatures] ytrain=dataFrame[target] ls = Lasso() ls.fit(xtrain, ytrain) rfe = RFE(ls, n_features_to_select=1, verbose = 0 ) rfe.fit(xtrain, ytrain) # total list of features ranks = {} ranks["RFE_LR"] = ranking(list(map(float, rfe.ranking_)), dataColumns, order=-1) for item in ranks["RFE_LR"]: if ranks["RFE_LR"][item]>0.30: #threshold as 30% modelselectedFeatures.append(item) modelselectedFeatures = list(modelselectedFeatures) self.log.info('-------> Highly Correlated Features Using LASSO + RFE: '+(str(modelselectedFeatures))[:500]) except Exception as e: self.log.info('---------------->'+str(e)) selector = SelectFromModel(Lasso()) xtrain=dataFrame[updatedFeatures] ytrain=dataFrame[target] selector.fit(xtrain,ytrain) modelselectedFeatures = xtrain.columns[(selector.get_support())].tolist() self.log.info('-------> Highly Correlated Features Using LASSO: '+(str(modelselectedFeatures))[:500]) executionTime=time.time() - start self.log.info('-------> Time Taken: '+str(executionTime)) self.log.info('Status:- |... Model based feature selection done: '+str(len(modelselectedFeatures))+' out of '+str(len(dataColumns))+' selected') self.log.info('--------- Model Based Correlation Analysis End -----\n') if target !="" and featureEngineeringSelector.lower() == "false" and allFeaturesSelector.lower() == "true": allFeatures = features_list if target != '': allFeatures.remove(target) #print(allFeatures) if len(topFeatures) == 0 and len(modelselectedFeatures) == 0 and len(allFeatures) == 0: allFeatures = features_list return dataFrame,target,topFeatures,modelselectedFeatures,allFeatures,targetType,similarGruops,numericContinuousFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,pca,bpca_features,apca_features,featureEngineeringSelector except Exception as inst: self.log.info('Feature selector failed: '+str(inst)) exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
featureImportance.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' #System imports import os import sys import json import datetime,time,timeit import itertools #Sci-Tools imports import numpy as np import pandas as pd import math from sklearn.metrics import normalized_mutual_info_score from sklearn.feature_selection import f_regression,mutual_info_regression from sklearn.feature_selection import chi2,f_classif,mutual_info_classif import scipy.stats from scipy.stats import pearsonr, spearmanr, pointbiserialr, f_oneway, kendalltau, chi2_contingency import statsmodels.api as sm import statsmodels.formula.api as smf import logging def getHigherSignificanceColName(featureDict, colname1, colname2): if featureDict[colname1]<featureDict[colname2]: return colname2 else: return colname1 class featureImp(): def __init__(self): self.dTypesDic = {} self.featureImpDic={} self.indexedDic = {} self.log = logging.getLogger('eion') def FFImpNew(self,df,contFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,dTypesDic,target,pValThInput,pValThTarget,corrThInput,corrThTarget,categoricalMaxLabel,problem_type,maxClasses): try: dataframe = df contiFeatures= contFeatures quantFeatures=discreteFeatures+contiFeatures categoricalFeatures=categoricalFeatures targetData=dataframe[target] nUnique=len(targetData.unique().tolist()) if nUnique <= categoricalMaxLabel: targetType="categorical" else: targetType="continuous" if problem_type.lower() == 'classification' and targetType == 'continuous': targetType = 'categorical' self.log.info( '-------> Change Target Type to Categorial as user defined') if problem_type.lower() == 'regression' and targetType == 'categorical': targetType = 'continuous' self.log.info( '-------> Change Target Type to Continuous as user defined') self.log.info( '-------> Target Type: '+str(targetType)) impFeatures=[] catFeature = [] numFeature = [] catFeatureXYcat = [] numFeatureXYcat = [] catFeatureXYnum= [] numFeatureXYnum = [] dropFeatureCat= [] dropFeatureNum = [] featureDict = {} if targetType =="categorical": if len(categoricalFeatures) !=0: # input vs target # chi-square for col in categoricalFeatures: contingency = pd.crosstab(dataframe[col], targetData) stat, p, dof, expected = chi2_contingency(contingency) if p <= pValThTarget: catFeatureXYcat.append(col) # categorical feature xy when target is cat featureDict[col] = p #input vs input # chi_square if len(catFeatureXYcat) != 0: length = len(catFeatureXYcat) for i in range(length): for j in range(i+1, length): contingency = pd.crosstab(dataframe[catFeatureXYcat[i]], dataframe[catFeatureXYcat[j]]) stat, p, dof, expected = chi2_contingency(contingency) if p > pValThInput: highSignificanceColName = getHigherSignificanceColName(featureDict, catFeatureXYcat[i], catFeatureXYcat[j]) dropFeatureCat.append(highSignificanceColName) break catFeature = list(set(catFeatureXYcat) - set(dropFeatureCat)) featureDict.clear() dropFeatureCat.clear() if len(quantFeatures) !=0: # input vs target # one way anova for col in quantFeatures: CategoryGroupLists = dataframe.groupby(target)[col].apply(list) AnovaResults = f_oneway(*CategoryGroupLists) if AnovaResults[1] <= pValThTarget: numFeatureXYcat.append(col) #numeric feature xy when target is cat featureDict[col] = AnovaResults[1] #input vs input # preason/spearman/ols # numeric feature xx when target is cat if len(numFeatureXYcat) != 0: df_xx = dataframe[numFeatureXYcat] rows, cols = df_xx.shape flds = list(df_xx.columns) corr_pearson = df_xx.corr(method='pearson').values corr_spearman = df_xx.corr(method='spearman').values for i in range(cols): for j in range(i+1, cols): if corr_pearson[i,j] > -corrThInput and corr_pearson[i,j] < corrThInput: if corr_spearman[i,j] > -corrThInput and corr_spearman[i,j] < corrThInput: #f = "'"+flds[i]+"'"+' ~ '+"'"+flds[j]+"'" #reg = smf.ols(formula=f, data=dataframe).fit() tmpdf = pd.DataFrame({'x':dataframe[flds[j]], 'y':dataframe[flds[i]]}) reg = smf.ols('y~x', data=tmpdf).fit() if len(reg.pvalues) > 1 and reg.pvalues[1] > pValThInput: highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j]) dropFeatureNum.append(highSignificanceColName) break else: highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j]) dropFeatureNum.append(highSignificanceColName) break else: highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j]) dropFeatureNum.append(highSignificanceColName) break numFeature = list(set(numFeatureXYcat) - set(dropFeatureNum)) dropFeatureNum.clear() featureDict.clear() impFeatures = numFeature+catFeature hCorrFeatures=list(set((impFeatures))) else: # targetType =="continuous": if len(categoricalFeatures) !=0: # input vs target # Anova for col in categoricalFeatures: #f = target+' ~ C('+col+')' #model = smf.ols(f, data=dataframe).fit() #table = sm.stats.anova_lm(model, typ=2) tmpdf = pd.DataFrame({'x':dataframe[col], 'y':dataframe[target]}) model = smf.ols('y~x', data=tmpdf).fit() table = sm.stats.anova_lm(model, typ=2) if table['PR(>F)'][0] <= pValThTarget: catFeatureXYnum.append(col) #categorical feature xy when target is numeric featureDict[col]=table['PR(>F)'][0] #input vs input # chi_square if len(catFeatureXYnum) != 0: length = len(catFeatureXYnum) for i in range(length): for j in range(i+1, length): contingency = pd.crosstab(dataframe[catFeatureXYnum[i]], dataframe[catFeatureXYnum[j]]) stat, p, dof, expected = chi2_contingency(contingency) if p > pValThInput: highSignificanceColName = getHigherSignificanceColName(featureDict, catFeatureXYnum[i], catFeatureXYnum[j]) dropFeatureCat.append(highSignificanceColName) break catFeature = list(set(catFeatureXYnum) - set(dropFeatureCat)) dropFeatureCat.clear() featureDict.clear() if len(quantFeatures) !=0: # input vs target # preason/spearman/ols for col in quantFeatures: pearson_corr = pearsonr(dataframe[col], targetData) coef = round(pearson_corr[0],5) p_value = round(pearson_corr[1],5) if coef > -corrThTarget and coef < corrThTarget: spearman_corr = spearmanr(dataframe[col], targetData) coef = round(spearman_corr[0],5) p_value = round(spearman_corr[1],5) if coef > -corrThTarget and coef < corrThTarget: #f = target+' ~ '+col #reg = smf.ols(formula=f, data=dataframe).fit() tmpdf = pd.DataFrame({'x':dataframe[col], 'y':dataframe[target]}) reg = smf.ols('y~x', data=tmpdf).fit() if len(reg.pvalues) > 1 and reg.pvalues[1] <= pValThTarget: numFeatureXYnum.append(col) # numeric feature xx when target is numeric featureDict[col]=reg.pvalues[1] else: numFeatureXYnum.append(col) featureDict[col]=p_value else: numFeatureXYnum.append(col) featureDict[col]=p_value #input vs input # preason/spearman/ols if len(numFeatureXYnum) != 0: df_xx = dataframe[numFeatureXYnum] rows, cols = df_xx.shape flds = list(df_xx.columns) corr_pearson = df_xx.corr(method='pearson').values corr_spearman = df_xx.corr(method='spearman').values for i in range(cols): for j in range(i+1, cols): if corr_pearson[i,j] > -corrThInput and corr_pearson[i,j] < corrThInput: if corr_spearman[i,j] > -corrThInput and corr_spearman[i,j] < corrThInput: #f = flds[i]+' ~ '+flds[j] #reg = smf.ols(formula=f, data=dataframe).fit() tmpdf = pd.DataFrame({'x':dataframe[flds[j]], 'y':dataframe[flds[i]]}) reg = smf.ols('y~x', data=tmpdf).fit() if len(reg.pvalues) > 1 and reg.pvalues[1] > pValThInput: highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j]) dropFeatureNum.append(highSignificanceColName) break else: highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j]) dropFeatureNum.append(highSignificanceColName) break else: highSignificanceColName = getHigherSignificanceColName(featureDict, flds[i], flds[j]) dropFeatureNum.append(highSignificanceColName) break numFeature = list(set(numFeatureXYnum) - set(dropFeatureNum)) featureDict.clear() dropFeatureNum.clear() impFeatures = numFeature+catFeature hCorrFeatures=list(set(impFeatures)) return hCorrFeatures,targetType except Exception as inst: self.log.info( '\n--> Failed calculating feature importance '+str(inst)) hCorrFeatures=[] targetType='' exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) self.log.info('\n--> Taking all the features as highest correlation features') hCorrFeatures = list(dataframe.columns) return hCorrFeatures,targetType def FFImp(self,df,contFeatures,discreteFeatures,nonNumericFeatures,categoricalFeatures,dTypesDic,target,pValTh,corrTh,categoricalMaxLabel,problem_type,maxClasses): ''' Input: dataframe, numeric continuous features, numeric discrete features Output: feature importance dictionary ''' try: dataframe =df contiFeatures= contFeatures discreteFeatures = discreteFeatures nonNumeric = nonNumericFeatures categoricalFeatures=categoricalFeatures self.dTypesDic = dTypesDic numericFeatures = contiFeatures + discreteFeatures+categoricalFeatures quantFeatures=discreteFeatures+contiFeatures scorrDict={} fScoreDict={} pcorrDict={} miDict={} targetData=dataframe[target] data=dataframe[numericFeatures] nUnique=len(targetData.unique().tolist()) nRows=targetData.shape[0] ''' print("\n ===> nUnique :") print(nUnique) print("\n ===> nRows :") print(nRows) print("\n ===> cFRatio :") print(cFRatio) print("\n ===> nUnique/nRows :") ''' #calratio = nUnique self.log.info( '-------> Target Column Unique Stats: '+str(nUnique)+' nRows: '+str(nRows)+' Unique:'+str(nUnique)) #sys.exit() if nUnique <= categoricalMaxLabel: targetType="categorical" else: targetType="continuous" if problem_type.lower() == 'classification' and targetType == 'continuous': targetType = 'categorical' self.log.info( '-------> Change Target Type to Categorial as user defined') if problem_type.lower() == 'regression' and targetType == 'categorical': targetType = 'continuous' self.log.info( '-------> Change Target Type to Continuous as user defined') self.log.info( '-------> Target Type: '+str(targetType)) impFeatures=[] featureImpDict={} if targetType =="categorical": try: if len(categoricalFeatures) !=0: categoricalData=dataframe[categoricalFeatures] chiSqCategorical=chi2(categoricalData,targetData)[1] corrSeries=pd.Series(chiSqCategorical, index=categoricalFeatures) impFeatures.append(corrSeries[corrSeries<pValTh].index.tolist()) corrDict=corrSeries.to_dict() featureImpDict['chiSquaretestPValue']=corrDict except Exception as inst: self.log.info("Found negative values in categorical variables "+str(inst)) if len(quantFeatures) !=0: try: quantData=dataframe[quantFeatures] fclassScore=f_classif(quantData,targetData)[1] miClassScore=mutual_info_classif(quantData,targetData) fClassSeries=pd.Series(fclassScore,index=quantFeatures) miClassSeries=pd.Series(miClassScore,index=quantFeatures) impFeatures.append(fClassSeries[fClassSeries<pValTh].index.tolist()) impFeatures.append(miClassSeries[miClassSeries>corrTh].index.tolist()) featureImpDict['anovaPValue']=fClassSeries.to_dict() featureImpDict['MIScore']=miClassSeries.to_dict() except MemoryError as inst: self.log.info( '-------> MemoryError in feature selection. '+str(inst)) pearsonScore=dataframe.corr() targetPScore=abs(pearsonScore[target]) impFeatures.append(targetPScore[targetPScore<pValTh].index.tolist()) featureImpDict['pearsonCoff']=targetPScore.to_dict() hCorrFeatures=list(set(sum(impFeatures, []))) else: if len(quantFeatures) !=0: try: quantData =dataframe[quantFeatures] fregScore=f_regression(quantData,targetData)[1] miregScore=mutual_info_regression(quantData,targetData) fregSeries=pd.Series(fregScore,index=quantFeatures) miregSeries=pd.Series(miregScore,index=quantFeatures) impFeatures.append(fregSeries[fregSeries<pValTh].index.tolist()) impFeatures.append(miregSeries[miregSeries>corrTh].index.tolist()) featureImpDict['anovaPValue']=fregSeries.to_dict() featureImpDict['MIScore']=miregSeries.to_dict() except MemoryError as inst: self.log.info( '-------> MemoryError in feature selection. '+str(inst)) pearsonScore=dataframe.corr() targetPScore=abs(pearsonScore[target]) impFeatures.append(targetPScore[targetPScore<pValTh].index.tolist()) featureImpDict['pearsonCoff']=targetPScore.to_dict() hCorrFeatures=list(set(sum(impFeatures, []))) return hCorrFeatures,targetType except Exception as inst: self.log.info( '\n--> Failed calculating feature importance '+str(inst)) hCorrFeatures=[] targetType='' exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename) self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) return hCorrFeatures,targetType ''' Importance degree Computes set of relational parameters pearson correlation, mutual information ''' def importanceDegree(self,dataframe,feature1,feature2): try: tempList = [] #Parameter 1: pearson correlation pcorr = self.pearsonCoff(dataframe,feature1,feature2) tempList.append(pcorr) #Parameter 2: mutual information #Testing mi = self.mutualInfo(dataframe,feature1,feature2,self.dTypesDic) tempList.append(mi) #return the highest parameter return np.max(tempList) except: return 0.0 ''' Compute pearson correlation ''' def pearsonCoff(self,dataframe,feature1,feature2): try: value=dataframe[feature1].corr(dataframe[feature2]) return np.abs(value) except: return 0.0 ''' Compute mutual information ''' def mutualInfo(self,dataframe,feature1,feature2,typeDic): try: numType = {'int64': 'discrete','int32' : 'discrete','int16' : 'discrete','float16' : 'continuous','float32' : 'continuous','float64' : 'continuous'} featureType1 = numType[typeDic[feature1]] featureType2 = numType[typeDic[feature2]] bufferList1=dataframe[feature1].values.tolist() bufferList2=dataframe[feature2].values.tolist() #Case 1: Only if both are discrete if(featureType1 == 'discrete' and featureType2 == 'discrete'): tempResult = discreteMI(bufferList1,bufferList2) return np.mean(tempResult) #Case 2: If one of the features is continuous elif(featureType1 == 'continuous' and featureType2 == 'discrete'): tempResult = self.categoricalMI(bufferList1,bufferList2) return np.mean(tempResult) else: tempResult = self.continuousMI(bufferList1,bufferList2) return np.mean(tempResult) except: return 0.0 def continuousMI(self,bufferList1,bufferList2): mi = 0.0 #Using mutual info regression from feature selection mi = mutual_info_regression(self.vec(bufferList1),bufferList2) return mi def categoricalMI(self,bufferList1,bufferList2): mi = 0.0 #Using mutual info classification from feature selection mi = mutual_info_classif(self.vec(bufferList1),bufferList2) return mi def discreteMI(self,bufferList1,bufferList2): mi = 0.0 #Using scikit normalized mutual information function mi = normalized_mutual_info_score(bufferList1,bufferList2) return mi def vec(self,x): return [[i] for i in x]
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
AION_Gluon_MultiModalPrediction.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import warnings warnings.filterwarnings("ignore") import json import os import sys import pandas as pd import numpy as np from pandas import json_normalize from autogluon.text import TextPredictor import os.path def predict(data): try: if os.path.splitext(data)[1] == ".tsv": df=pd.read_csv(data,encoding='utf-8',sep='\t') elif os.path.splitext(data)[1] == ".csv": df=pd.read_csv(data,encoding='utf-8') else: if os.path.splitext(data)[1] == ".json": with open(data,'r',encoding='utf-8') as f: jsonData = json.load(f) else: jsonData = json.loads(data) df = json_normalize(jsonData) model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'text_prediction') predictor = TextPredictor.load(model_path) predictions = predictor.predict(df) df['predict'] = predictions outputjson = df.to_json(orient='records') outputjson = {"status":"SUCCESS","data":json.loads(outputjson)} output = json.dumps(outputjson) print("predictions:",output) return(output) except KeyError as e: output = {"status":"FAIL","message":str(e).strip('"')} print("predictions:",json.dumps(output)) return (json.dumps(output)) except Exception as e: output = {"status":"FAIL","message":str(e).strip('"')} print("predictions:",json.dumps(output)) return (json.dumps(output)) if __name__ == "__main__": output = predict(sys.argv[1])
__init__.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * '''
AION_Gluon_MultiLabelPrediction.py
''' * * ============================================================================= * COPYRIGHT NOTICE * ============================================================================= * @ Copyright HCL Technologies Ltd. 2021, 2022,2023 * Proprietary and confidential. All information contained herein is, and * remains the property of HCL Technologies Limited. Copying or reproducing the * contents of this file, via any medium is strictly prohibited unless prior * written permission is obtained from HCL Technologies Limited. * ''' import warnings warnings.filterwarnings("ignore") import json import os import sys import pandas as pd from pandas import json_normalize #from selector import selector #from inputprofiler import inputprofiler #from trained_model import trained_model #from output_format import output_format from autogluon.tabular import TabularDataset, TabularPredictor from autogluon.core.utils.utils import setup_outputdir from autogluon.core.utils.loaders import load_pkl from autogluon.core.utils.savers import save_pkl import os.path class MultilabelPredictor(): """ Tabular Predictor for predicting multiple columns in table. Creates multiple TabularPredictor objects which you can also use individually. You can access the TabularPredictor for a particular label via: `multilabel_predictor.get_predictor(label_i)` Parameters ---------- labels : List[str] The ith element of this list is the column (i.e. `label`) predicted by the ith TabularPredictor stored in this object. path : str Path to directory where models and intermediate outputs should be saved. If unspecified, a time-stamped folder called "AutogluonModels/ag-[TIMESTAMP]" will be created in the working directory to store all models. Note: To call `fit()` twice and save all results of each fit, you must specify different `path` locations or don't specify `path` at all. Otherwise files from first `fit()` will be overwritten by second `fit()`. Caution: when predicting many labels, this directory may grow large as it needs to store many TabularPredictors. problem_types : List[str] The ith element is the `problem_type` for the ith TabularPredictor stored in this object. eval_metrics : List[str] The ith element is the `eval_metric` for the ith TabularPredictor stored in this object. consider_labels_correlation : bool Whether the predictions of multiple labels should account for label correlations or predict each label independently of the others. If True, the ordering of `labels` may affect resulting accuracy as each label is predicted conditional on the previous labels appearing earlier in this list (i.e. in an auto-regressive fashion). Set to False if during inference you may want to individually use just the ith TabularPredictor without predicting all the other labels. kwargs : Arguments passed into the initialization of each TabularPredictor. """ multi_predictor_file = 'multilabel_predictor.pkl' def __init__(self, labels, path, problem_types=None, eval_metrics=None, consider_labels_correlation=True, **kwargs): if len(labels) < 2: raise ValueError("MultilabelPredictor is only intended for predicting MULTIPLE labels (columns), use TabularPredictor for predicting one label (column).") self.path = setup_outputdir(path, warn_if_exist=False) self.labels = labels self.consider_labels_correlation = consider_labels_correlation self.predictors = {} # key = label, value = TabularPredictor or str path to the TabularPredictor for this label if eval_metrics is None: self.eval_metrics = {} else: self.eval_metrics = {labels[i] : eval_metrics[i] for i in range(len(labels))} problem_type = None eval_metric = None for i in range(len(labels)): label = labels[i] path_i = self.path + "Predictor_" + label if problem_types is not None: problem_type = problem_types[i] if eval_metrics is not None: eval_metric = self.eval_metrics[i] self.predictors[label] = TabularPredictor(label=label, problem_type=problem_type, eval_metric=eval_metric, path=path_i, **kwargs) def fit(self, train_data, tuning_data=None, **kwargs): """ Fits a separate TabularPredictor to predict each of the labels. Parameters ---------- train_data, tuning_data : str or autogluon.tabular.TabularDataset or pd.DataFrame See documentation for `TabularPredictor.fit()`. kwargs : Arguments passed into the `fit()` call for each TabularPredictor. """ if isinstance(train_data, str): train_data = TabularDataset(train_data) if tuning_data is not None and isinstance(tuning_data, str): tuning_data = TabularDataset(tuning_data) train_data_og = train_data.copy() if tuning_data is not None: tuning_data_og = tuning_data.copy() save_metrics = len(self.eval_metrics) == 0 for i in range(len(self.labels)): label = self.labels[i] predictor = self.get_predictor(label) if not self.consider_labels_correlation: labels_to_drop = [l for l in self.labels if l!=label] else: labels_to_drop = [labels[j] for j in range(i+1,len(self.labels))] train_data = train_data_og.drop(labels_to_drop, axis=1) if tuning_data is not None: tuning_data = tuning_data_og.drop(labels_to_drop, axis=1) print(f"Fitting TabularPredictor for label: {label} ...") predictor.fit(train_data=train_data, tuning_data=tuning_data, **kwargs) self.predictors[label] = predictor.path if save_metrics: self.eval_metrics[label] = predictor.eval_metric self.save() def predict(self, data, **kwargs): """ Returns DataFrame with label columns containing predictions for each label. Parameters ---------- data : str or autogluon.tabular.TabularDataset or pd.DataFrame Data to make predictions for. If label columns are present in this data, they will be ignored. See documentation for `TabularPredictor.predict()`. kwargs : Arguments passed into the predict() call for each TabularPredictor. """ return self._predict(data, as_proba=False, **kwargs) def predict_proba(self, data, **kwargs): """ Returns dict where each key is a label and the corresponding value is the `predict_proba()` output for just that label. Parameters ---------- data : str or autogluon.tabular.TabularDataset or pd.DataFrame Data to make predictions for. See documentation for `TabularPredictor.predict()` and `TabularPredictor.predict_proba()`. kwargs : Arguments passed into the `predict_proba()` call for each TabularPredictor (also passed into a `predict()` call). """ return self._predict(data, as_proba=True, **kwargs) def evaluate(self, data, **kwargs): """ Returns dict where each key is a label and the corresponding value is the `evaluate()` output for just that label. Parameters ---------- data : str or autogluon.tabular.TabularDataset or pd.DataFrame Data to evalate predictions of all labels for, must contain all labels as columns. See documentation for `TabularPredictor.evaluate()`. kwargs : Arguments passed into the `evaluate()` call for each TabularPredictor (also passed into the `predict()` call). """ data = self._get_data(data) eval_dict = {} for label in self.labels: print(f"Evaluating TabularPredictor for label: {label} ...") predictor = self.get_predictor(label) eval_dict[label] = predictor.evaluate(data, **kwargs) if self.consider_labels_correlation: data[label] = predictor.predict(data, **kwargs) return eval_dict def save(self): """ Save MultilabelPredictor to disk. """ for label in self.labels: if not isinstance(self.predictors[label], str): self.predictors[label] = self.predictors[label].path save_pkl.save(path=self.path+self.multi_predictor_file, object=self) print(f"MultilabelPredictor saved to disk. Load with: MultilabelPredictor.load('{self.path}')") @classmethod def load(cls, path): """ Load MultilabelPredictor from disk `path` previously specified when creating this MultilabelPredictor. """ path = os.path.expanduser(path) if path[-1] != os.path.sep: path = path + os.path.sep return load_pkl.load(path=path+cls.multi_predictor_file) def get_predictor(self, label): """ Returns TabularPredictor which is used to predict this label. """ predictor = self.predictors[label] if isinstance(predictor, str): return TabularPredictor.load(path=predictor) return predictor def _get_data(self, data): if isinstance(data, str): return TabularDataset(data) return data.copy() def _predict(self, data, as_proba=False, **kwargs): data = self._get_data(data) if as_proba: predproba_dict = {} for label in self.labels: print(f"Predicting with TabularPredictor for label: {label} ...") predictor = self.get_predictor(label) if as_proba: predproba_dict[label] = predictor.predict_proba(data, as_multiclass=True, **kwargs) data[label] = predictor.predict(data, **kwargs) if not as_proba: return data[self.labels] else: return predproba_dict def predict(data): try: if os.path.splitext(data)[1] == ".tsv": df=pd.read_csv(data,encoding='utf-8',sep='\t') elif os.path.splitext(data)[1] == ".csv": df=pd.read_csv(data,encoding='utf-8') else: if os.path.splitext(data)[1] == ".json": with open(data,'r',encoding='utf-8') as f: jsonData = json.load(f) else: jsonData = json.loads(data) df = json_normalize(jsonData) #df0 = df.copy() #profilerobj = inputprofiler() #df = profilerobj.apply_profiler(df) #selectobj = selector() #df = selectobj.apply_selector(df) #modelobj = trained_model() #output = modelobj.predict(df,"") # Load the Test data for Prediction # ----------------------------------------------------------------------------# test_data = df#TabularDataset(data) #'testingDataset.csv' #subsample_size = 2 # ----------------------------------------------------------------------------# # Specify the corresponding target features to be used # ----------------------------------------------------------------------------# #labels = ['education-num','education','class'] configFile = os.path.join(os.path.dirname(os.path.abspath(__file__)),'etc','predictionConfig.json') with open(configFile, 'rb') as cfile: data = json.load(cfile) labels = data['targetFeature'] # ----------------------------------------------------------------------------# for x in labels: if x in list(test_data.columns): test_data.drop(x,axis='columns', inplace=True) # ----------------------------------------------------------------------------# #test_data = test_data.sample(n=subsample_size, random_state=0) #print(test_data) #test_data_nolab = test_data.drop(columns=labels) #test_data_nolab.head() test_data_nolab = test_data # ----------------------------------------------------------------------------# # Load the trained model from where it's stored # ----------------------------------------------------------------------------# model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'ModelPath') multi_predictor = MultilabelPredictor.load(model_path) # ----------------------------------------------------------------------------# # Start the prediction and perform the evaluation # ----------------------------------------------------------------------------# predictions = multi_predictor.predict(test_data_nolab) for label in labels: df[label+'_predict'] = predictions[label] #evaluations = multi_predictor.evaluate(test_data) #print(evaluations) #print("Evaluated using metrics:", multi_predictor.eval_metrics) # ----------------------------------------------------------------------------# # ----------------------------------------------------------------------------# #outputobj = output_format() #output = outputobj.apply_output_format(df0,output) outputjson = df.to_json(orient='records') outputjson = {"status":"SUCCESS","data":json.loads(outputjson)} output = json.dumps(outputjson) print("predictions:",output) return(output) # ----------------------------------------------------------------------------# except KeyError as e: output = {"status":"FAIL","message":str(e).strip('"')} print("predictions:",json.dumps(output)) return (json.dumps(output)) except Exception as e: output = {"status":"FAIL","message":str(e).strip('"')} print("predictions:",json.dumps(output)) return (json.dumps(output)) if __name__ == "__main__": output = predict(sys.argv[1])
__init__.py
null
regression_metrics.py
import numpy as np from scipy.stats import norm from sklearn.metrics import mean_squared_error, r2_score from ..utils.misc import fitted_ucc_w_nullref def picp(y_true, y_lower, y_upper): """ Prediction Interval Coverage Probability (PICP). Computes the fraction of samples for which the grounds truth lies within predicted interval. Measures the prediction interval calibration for regression. Args: y_true: Ground truth y_lower: predicted lower bound y_upper: predicted upper bound Returns: float: the fraction of samples for which the grounds truth lies within predicted interval. """ satisfies_upper_bound = y_true <= y_upper satisfies_lower_bound = y_true >= y_lower return np.mean(satisfies_upper_bound * satisfies_lower_bound) def mpiw(y_lower, y_upper): """ Mean Prediction Interval Width (MPIW). Computes the average width of the the prediction intervals. Measures the sharpness of intervals. Args: y_lower: predicted lower bound y_upper: predicted upper bound Returns: float: the average width the prediction interval across samples. """ return np.mean(np.abs(y_lower - y_upper)) def auucc_gain(y_true, y_mean, y_lower, y_upper): """ Computes the Area Under the Uncertainty Characteristics Curve (AUUCC) gain wrt to a null reference with constant band. Args: y_true: Ground truth y_mean: predicted mean y_lower: predicted lower bound y_upper: predicted upper bound Returns: float: AUUCC gain """ u = fitted_ucc_w_nullref(y_true, y_mean, y_lower, y_upper) auucc = u.get_AUUCC() assert(isinstance(auucc, list) and len(auucc) == 2), "Failed to calculate auucc gain" assert (not np.isclose(auucc[1], 0.)), "Failed to calculate auucc gain" auucc_gain = (auucc[1]-auucc[0])/auucc[0] return auucc_gain def negative_log_likelihood_Gaussian(y_true, y_mean, y_lower, y_upper): """ Computes Gaussian negative_log_likelihood assuming symmetric band around the mean. Args: y_true: Ground truth y_mean: predicted mean y_lower: predicted lower bound y_upper: predicted upper bound Returns: float: nll """ y_std = (y_upper - y_lower) / 4.0 nll = np.mean(-norm.logpdf(y_true.squeeze(), loc=y_mean.squeeze(), scale=y_std.squeeze())) return nll def compute_regression_metrics(y_true, y_mean, y_lower, y_upper, option="all", nll_fn=None): """ Computes the metrics specified in the option which can be string or a list of strings. Default option `all` computes the ["rmse", "nll", "auucc_gain", "picp", "mpiw", "r2"] metrics. Args: y_true: Ground truth y_mean: predicted mean y_lower: predicted lower bound y_upper: predicted upper bound option: string or list of string contained the name of the metrics to be computed. nll_fn: function that evaluates NLL, if None, then computes Gaussian NLL using y_mean and y_lower. Returns: dict: dictionary containing the computed metrics. """ assert y_true.shape == y_mean.shape, "y_true shape: {}, y_mean shape: {}".format(y_true.shape, y_mean.shape) assert y_true.shape == y_lower.shape, "y_true shape: {}, y_mean shape: {}".format(y_true.shape, y_lower.shape) assert y_true.shape == y_upper.shape, "y_true shape: {}, y_mean shape: {}".format(y_true.shape, y_upper.shape) results = {} if not isinstance(option, list): if option == "all": option_list = ["rmse", "nll", "auucc_gain", "picp", "mpiw", "r2"] else: option_list = [option] if "rmse" in option_list: results["rmse"] = mean_squared_error(y_true, y_mean, squared=False) if "nll" in option_list: if nll_fn is None: nll = negative_log_likelihood_Gaussian(y_true, y_mean, y_lower, y_upper) results["nll"] = nll else: results["nll"] = np.mean(nll_fn(y_true)) if "auucc_gain" in option_list: gain = auucc_gain(y_true, y_mean, y_lower, y_upper) results["auucc_gain"] = gain if "picp" in option_list: results["picp"] = picp(y_true, y_lower, y_upper) if "mpiw" in option_list: results["mpiw"] = mpiw(y_lower, y_upper) if "r2" in option_list: results["r2"] = r2_score(y_true, y_mean) return results def _check_not_tuple_of_2_elements(obj, obj_name='obj'): """Check object is not tuple or does not have 2 elements.""" if not isinstance(obj, tuple) or len(obj) != 2: raise TypeError('%s must be a tuple of 2 elements.' % obj_name) def plot_uncertainty_distribution(dist, show_quantile_dots=False, qd_sample=20, qd_bins=7, ax=None, figsize=None, dpi=None, title='Predicted Distribution', xlims=None, xlabel='Prediction', ylabel='Density', **kwargs): """ Plot the uncertainty distribution for a single distribution. Args: dist: scipy.stats._continuous_distns. A scipy distribution object. show_quantile_dots: boolean. Whether to show quantil dots on top of the density plot. qd_sample: int. Number of dots for the quantile dot plot. qd_bins: int. Number of bins for the quantile dot plot. ax: matplotlib.axes.Axes or None, optional (default=None). Target axes instance. If None, new figure and axes will be created. figsize: tuple of 2 elements or None, optional (default=None). Figure size. dpi : int or None, optional (default=None). Resolution of the figure. title : string or None, optional (default=Prediction Distribution) Axes title. If None, title is disabled. xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``. xlabel : string or None, optional (default=Prediction) X-axis title label. If None, title is disabled. ylabel : string or None, optional (default=Density) Y-axis title label. If None, title is disabled. Returns: matplotlib.axes.Axes: ax : The plot with prediction distribution. """ import matplotlib.pyplot as plt if ax is None: if figsize is not None: _check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) x = np.linspace(dist.ppf(0.01), dist.ppf(0.99), 100) ax.plot(x, dist.pdf(x), **kwargs) if show_quantile_dots: from matplotlib.patches import Circle from matplotlib.collections import PatchCollection import matplotlib.ticker as ticker data = dist.rvs(size=10000) p_less_than_x = np.linspace(1 / qd_sample / 2, 1 - (1 / qd_sample / 2), qd_sample) x_ = np.percentile(data, p_less_than_x * 100) # Inverce CDF (ppf) # Create bins hist = np.histogram(x_, bins=qd_bins) bins, edges = hist radius = (edges[1] - edges[0]) / 2 ax2 = ax.twinx() patches = [] max_y = 0 for i in range(qd_bins): x_bin = (edges[i + 1] + edges[i]) / 2 y_bins = [(i + 1) * (radius * 2) for i in range(bins[i])] max_y = max(y_bins) if max(y_bins) > max_y else max_y for _, y_bin in enumerate(y_bins): circle = Circle((x_bin, y_bin), radius) patches.append(circle) p = PatchCollection(patches, alpha=0.4) ax2.add_collection(p) # Axis tweek y_scale = (max_y + radius) / max(dist.pdf(x)) ticks_y = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(x_ / y_scale)) ax2.yaxis.set_major_formatter(ticks_y) ax2.set_yticklabels([]) if xlims is not None: ax2.set_xlim(left=xlims[0], right=xlims[1]) else: ax2.set_xlim([min(x_) - radius, max(x) + radius]) ax2.set_ylim([0, max_y + radius]) ax2.set_aspect(1) if title is not None: ax.set_title(title) if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) return ax def plot_picp_by_feature(x_test, y_test, y_test_pred_lower_total, y_test_pred_upper_total, num_bins=10, ax=None, figsize=None, dpi=None, xlims=None, ylims=None, xscale="linear", title=None, xlabel=None, ylabel=None): """ Plot how prediction uncertainty varies across the entire range of a feature. Args: x_test: One dimensional ndarray. Feature column of the test dataset. y_test: One dimensional ndarray. Ground truth label of the test dataset. y_test_pred_lower_total: One dimensional ndarray. Lower bound of the total uncertainty range. y_test_pred_upper_total: One dimensional ndarray. Upper bound of the total uncertainty range. num_bins: int. Number of bins used to discritize x_test into equal-sample-sized bins. ax: matplotlib.axes.Axes or None, optional (default=None). Target axes instance. If None, new figure and axes will be created. figsize: tuple of 2 elements or None, optional (default=None). Figure size. dpi : int or None, optional (default=None). Resolution of the figure. xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``. ylims: tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.ylim()``. xscale: Passed to ``ax.set_xscale()``. title : string or None, optional Axes title. If None, title is disabled. xlabel : string or None, optional X-axis title label. If None, title is disabled. ylabel : string or None, optional Y-axis title label. If None, title is disabled. Returns: matplotlib.axes.Axes: ax : The plot with PICP scores binned by a feature. """ from scipy.stats.mstats import mquantiles import matplotlib.pyplot as plt if ax is None: if figsize is not None: _check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) x_uniques_sorted = np.sort(np.unique(x_test)) num_unique = len(x_uniques_sorted) sample_bin_ids = np.searchsorted(x_uniques_sorted, x_test) if len(x_uniques_sorted) > 10: # bin the values q_bins = mquantiles(x_test, np.histogram_bin_edges([], bins=num_bins-1, range=(0.0, 1.0))[1:]) q_sample_bin_ids = np.digitize(x_test, q_bins) picps = np.array([picp(y_test[q_sample_bin_ids==bin], y_test_pred_lower_total[q_sample_bin_ids==bin], y_test_pred_upper_total[q_sample_bin_ids==bin]) for bin in range(num_bins)]) unique_sample_bin_ids = np.digitize(x_uniques_sorted, q_bins) picp_replicated = [len(x_uniques_sorted[unique_sample_bin_ids == bin]) * [picps[bin]] for bin in range(num_bins)] picp_replicated = np.array([item for sublist in picp_replicated for item in sublist]) else: picps = np.array([picp(y_test[sample_bin_ids == bin], y_test_pred_lower_total[sample_bin_ids == bin], y_test_pred_upper_total[sample_bin_ids == bin]) for bin in range(num_unique)]) picp_replicated = picps ax.plot(x_uniques_sorted, picp_replicated, label='PICP') ax.axhline(0.95, linestyle='--', label='95%') ax.set_ylabel('PICP') ax.legend(loc='best') if title is None: title = 'Test data overall PICP: {:.2f} MPIW: {:.2f}'.format( picp(y_test, y_test_pred_lower_total, y_test_pred_upper_total), mpiw(y_test_pred_lower_total, y_test_pred_upper_total)) if xlims is not None: ax.set_xlim(left=xlims[0], right=xlims[1]) if ylims is not None: ax.set_ylim(bottom=ylims[0], top=ylims[1]) ax.set_title(title) if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) if xscale is not None: ax.set_xscale(xscale) return ax def plot_uncertainty_by_feature(x_test, y_test_pred_mean, y_test_pred_lower_total, y_test_pred_upper_total, y_test_pred_lower_epistemic=None, y_test_pred_upper_epistemic=None, ax=None, figsize=None, dpi=None, xlims=None, xscale="linear", title=None, xlabel=None, ylabel=None): """ Plot how prediction uncertainty varies across the entire range of a feature. Args: x_test: one dimensional ndarray. Feature column of the test dataset. y_test_pred_mean: One dimensional ndarray. Model prediction for the test dataset. y_test_pred_lower_total: One dimensional ndarray. Lower bound of the total uncertainty range. y_test_pred_upper_total: One dimensional ndarray. Upper bound of the total uncertainty range. y_test_pred_lower_epistemic: One dimensional ndarray. Lower bound of the epistemic uncertainty range. y_test_pred_upper_epistemic: One dimensional ndarray. Upper bound of the epistemic uncertainty range. ax: matplotlib.axes.Axes or None, optional (default=None). Target axes instance. If None, new figure and axes will be created. figsize: tuple of 2 elements or None, optional (default=None). Figure size. dpi : int or None, optional (default=None). Resolution of the figure. xlims : tuple of 2 elements or None, optional (default=None). Tuple passed to ``ax.xlim()``. xscale: Passed to ``ax.set_xscale()``. title : string or None, optional Axes title. If None, title is disabled. xlabel : string or None, optional X-axis title label. If None, title is disabled. ylabel : string or None, optional Y-axis title label. If None, title is disabled. Returns: matplotlib.axes.Axes: ax : The plot with model's uncertainty binned by a feature. """ import matplotlib.pyplot as plt if ax is None: if figsize is not None: _check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) x_uniques_sorted = np.sort(np.unique(x_test)) y_pred_var = ((y_test_pred_upper_total - y_test_pred_lower_total) / 4.0)**2 agg_y_std = np.array([np.sqrt(np.mean(y_pred_var[x_test==x])) for x in x_uniques_sorted]) agg_y_mean = np.array([np.mean(y_test_pred_mean[x_test==x]) for x in x_uniques_sorted]) ax.plot(x_uniques_sorted, agg_y_mean, '-b', lw=2, label='mean prediction') ax.fill_between(x_uniques_sorted, agg_y_mean - 2.0 * agg_y_std, agg_y_mean + 2.0 * agg_y_std, alpha=0.3, label='total uncertainty') if y_test_pred_lower_epistemic is not None: y_pred_var_epistemic = ((y_test_pred_upper_epistemic - y_test_pred_lower_epistemic) / 4.0)**2 agg_y_std_epistemic = np.array([np.sqrt(np.mean(y_pred_var_epistemic[x_test==x])) for x in x_uniques_sorted]) ax.fill_between(x_uniques_sorted, agg_y_mean - 2.0 * agg_y_std_epistemic, agg_y_mean + 2.0 * agg_y_std_epistemic, alpha=0.3, label='model uncertainty') ax.legend(loc='best') if xlims is not None: ax.set_xlim(left=xlims[0], right=xlims[1]) if title is not None: ax.set_title(title) if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) if xscale is not None: ax.set_xscale(xscale) return ax
classification_metrics.py
import numpy as np import pandas as pd from scipy.stats import entropy from sklearn.metrics import roc_auc_score, log_loss, accuracy_score def entropy_based_uncertainty_decomposition(y_prob_samples): """ Entropy based decomposition [2]_ of predictive uncertainty into aleatoric and epistemic components. References: .. [2] Depeweg, S., Hernandez-Lobato, J. M., Doshi-Velez, F., & Udluft, S. (2018, July). Decomposition of uncertainty in Bayesian deep learning for efficient and risk-sensitive learning. In International Conference on Machine Learning (pp. 1184-1193). PMLR. Args: y_prob_samples: list of array-like of shape (n_samples, n_classes) containing class prediction probabilities corresponding to samples from the model posterior. Returns: tuple: - total_uncertainty: entropy of the predictive distribution. - aleatoric_uncertainty: aleatoric component of the total_uncertainty. - epistemic_uncertainty: epistemic component of the total_uncertainty. """ y_preds_samples_stacked = np.stack(y_prob_samples) preds_mean = np.mean(y_preds_samples_stacked, 0) total_uncertainty = entropy(preds_mean, axis=1) aleatoric_uncertainty = np.mean( np.concatenate([entropy(y_pred, axis=1).reshape(-1, 1) for y_pred in y_prob_samples], axis=1), axis=1) epistemic_uncertainty = total_uncertainty - aleatoric_uncertainty return total_uncertainty, aleatoric_uncertainty, epistemic_uncertainty def multiclass_brier_score(y_true, y_prob): """Brier score for multi-class. Args: y_true: array-like of shape (n_samples,) ground truth labels. y_prob: array-like of shape (n_samples, n_classes). Probability scores from the base model. Returns: float: Brier score. """ assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)" y_target = np.zeros_like(y_prob) y_target[:, y_true] = 1.0 return np.mean(np.sum((y_target - y_prob) ** 2, axis=1)) def area_under_risk_rejection_rate_curve(y_true, y_prob, y_pred=None, selection_scores=None, risk_func=accuracy_score, attributes=None, num_bins=10, subgroup_ids=None, return_counts=False): """ Computes risk vs rejection rate curve and the area under this curve. Similar to risk-coverage curves [3]_ where coverage instead of rejection rate is used. References: .. [3] Franc, Vojtech, and Daniel Prusa. "On discriminative learning of prediction uncertainty." In International Conference on Machine Learning, pp. 1963-1971. 2019. Args: y_true: array-like of shape (n_samples,) ground truth labels. y_prob: array-like of shape (n_samples, n_classes). Probability scores from the base model. y_pred: array-like of shape (n_samples,) predicted labels. selection_scores: scores corresponding to certainty in the predicted labels. risk_func: risk function under consideration. attributes: (optional) if risk function is a fairness metric also pass the protected attribute name. num_bins: number of bins. subgroup_ids: (optional) selectively compute risk on a subgroup of the samples specified by subgroup_ids. return_counts: set to True to return counts also. Returns: float or tuple: - aurrrc (float): area under risk rejection rate curve. - rejection_rates (list): rejection rates for each bin (returned only if return_counts is True). - selection_thresholds (list): selection threshold for each bin (returned only if return_counts is True). - risks (list): risk in each bin (returned only if return_counts is True). """ if selection_scores is None: assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)" selection_scores = y_prob[np.arange(y_prob.shape[0]), np.argmax(y_prob, axis=1)] if y_pred is None: assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)" y_pred = np.argmax(y_prob, axis=1) order = np.argsort(selection_scores)[::-1] rejection_rates = [] selection_thresholds = [] risks = [] for bin_id in range(num_bins): samples_in_bin = len(y_true) // num_bins selection_threshold = selection_scores[order[samples_in_bin * (bin_id+1)-1]] selection_thresholds.append(selection_threshold) ids = selection_scores >= selection_threshold if sum(ids) > 0: if attributes is None: if isinstance(y_true, pd.Series): y_true_numpy = y_true.values else: y_true_numpy = y_true if subgroup_ids is None: risk_value = 1.0 - risk_func(y_true_numpy[ids], y_pred[ids]) else: if sum(subgroup_ids & ids) > 0: risk_value = 1.0 - risk_func(y_true_numpy[subgroup_ids & ids], y_pred[subgroup_ids & ids]) else: risk_value = 0.0 else: risk_value = risk_func(y_true.iloc[ids], y_pred[ids], prot_attr=attributes) else: risk_value = 0.0 risks.append(risk_value) rejection_rates.append(1.0 - 1.0 * sum(ids) / len(y_true)) aurrrc = np.nanmean(risks) if not return_counts: return aurrrc else: return aurrrc, rejection_rates, selection_thresholds, risks def expected_calibration_error(y_true, y_prob, y_pred=None, num_bins=10, return_counts=False): """ Computes the reliability curve and the expected calibration error [1]_ . References: .. [1] Chuan Guo, Geoff Pleiss, Yu Sun, Kilian Q. Weinberger; Proceedings of the 34th International Conference on Machine Learning, PMLR 70:1321-1330, 2017. Args: y_true: array-like of shape (n_samples,) ground truth labels. y_prob: array-like of shape (n_samples, n_classes). Probability scores from the base model. y_pred: array-like of shape (n_samples,) predicted labels. num_bins: number of bins. return_counts: set to True to return counts also. Returns: float or tuple: - ece (float): expected calibration error. - confidences_in_bins: average confidence in each bin (returned only if return_counts is True). - accuracies_in_bins: accuracy in each bin (returned only if return_counts is True). - frac_samples_in_bins: fraction of samples in each bin (returned only if return_counts is True). """ assert len(y_prob.shape) > 1, "y_prob should be array-like of shape (n_samples, n_classes)" num_samples, num_classes = y_prob.shape top_scores = np.max(y_prob, axis=1) if y_pred is None: y_pred = np.argmax(y_prob, axis=1) if num_classes == 2: bins_edges = np.histogram_bin_edges([], bins=num_bins, range=(0.5, 1.0)) else: bins_edges = np.histogram_bin_edges([], bins=num_bins, range=(0.0, 1.0)) non_boundary_bin_edges = bins_edges[1:-1] bin_centers = (bins_edges[1:] + bins_edges[:-1])/2 sample_bin_ids = np.digitize(top_scores, non_boundary_bin_edges) num_samples_in_bins = np.zeros(num_bins) accuracies_in_bins = np.zeros(num_bins) confidences_in_bins = np.zeros(num_bins) for bin in range(num_bins): num_samples_in_bins[bin] = len(y_pred[sample_bin_ids == bin]) if num_samples_in_bins[bin] > 0: accuracies_in_bins[bin] = np.sum(y_true[sample_bin_ids == bin] == y_pred[sample_bin_ids == bin]) / num_samples_in_bins[bin] confidences_in_bins[bin] = np.sum(top_scores[sample_bin_ids == bin]) / num_samples_in_bins[bin] ece = np.sum( num_samples_in_bins * np.abs(accuracies_in_bins - confidences_in_bins) / num_samples ) frac_samples_in_bins = num_samples_in_bins / num_samples if not return_counts: return ece else: return ece, confidences_in_bins, accuracies_in_bins, frac_samples_in_bins, bin_centers def compute_classification_metrics(y_true, y_prob, option='all'): """ Computes the metrics specified in the option which can be string or a list of strings. Default option `all` computes the [aurrrc, ece, auroc, nll, brier, accuracy] metrics. Args: y_true: array-like of shape (n_samples,) ground truth labels. y_prob: array-like of shape (n_samples, n_classes). Probability scores from the base model. option: string or list of string contained the name of the metrics to be computed. Returns: dict: a dictionary containing the computed metrics. """ results = {} if not isinstance(option, list): if option == "all": option_list = ["aurrrc", "ece", "auroc", "nll", "brier", "accuracy"] else: option_list = [option] if "aurrrc" in option_list: results["aurrrc"] = area_under_risk_rejection_rate_curve(y_true=y_true, y_prob=y_prob) if "ece" in option_list: results["ece"] = expected_calibration_error(y_true=y_true, y_prob=y_prob) if "auroc" in option_list: results["auroc"], _ = roc_auc_score(y_true=y_true, y_score=y_prob) if "nll" in option_list: results["nll"] = log_loss(y_true=y_true, y_pred=np.argmax(y_prob, axis=1)) if "brier" in option_list: results["brier"] = multiclass_brier_score(y_true=y_true, y_prob=y_prob) if "accuracy" in option_list: results["accuracy"] = accuracy_score(y_true=y_true, y_pred=np.argmax(y_prob, axis=1)) return results def plot_reliability_diagram(y_true, y_prob, y_pred, plot_label=[""], num_bins=10): """ Plots the reliability diagram showing the calibration error for different confidence scores. Multiple curves can be plot by passing data as lists. Args: y_true: array-like or or a list of array-like of shape (n_samples,) ground truth labels. y_prob: array-like or or a list of array-like of shape (n_samples, n_classes). Probability scores from the base model. y_pred: array-like or or a list of array-like of shape (n_samples,) predicted labels. plot_label: (optional) list of names identifying each curve. num_bins: number of bins. Returns: tuple: - ece_list: ece: list containing expected calibration error for each curve. - accuracies_in_bins_list: list containing binned average accuracies for each curve. - frac_samples_in_bins_list: list containing binned sample frequencies for each curve. - confidences_in_bins_list: list containing binned average confidence for each curve. """ import matplotlib.pyplot as plt if not isinstance(y_true, list): y_true, y_prob, y_pred = [y_true], [y_prob], [y_pred] if len(plot_label) != len(y_true): raise ValueError('y_true and plot_label should be of same length.') ece_list = [] accuracies_in_bins_list = [] frac_samples_in_bins_list = [] confidences_in_bins_list = [] for idx in range(len(plot_label)): ece, confidences_in_bins, accuracies_in_bins, frac_samples_in_bins, bins = expected_calibration_error(y_true[idx], y_prob[idx], y_pred[idx], num_bins=num_bins, return_counts=True) ece_list.append(ece) accuracies_in_bins_list.append(accuracies_in_bins) frac_samples_in_bins_list.append(frac_samples_in_bins) confidences_in_bins_list.append(confidences_in_bins) fig = plt.figure(figsize=(12, 5)) plt.subplot(1, 2, 1) for idx in range(len(plot_label)): plt.plot(bins, frac_samples_in_bins_list[idx], 'o-', label=plot_label[idx]) plt.title("Confidence Histogram") plt.xlabel("Confidence") plt.ylabel("Fraction of Samples") plt.grid() plt.ylim([0.0, 1.0]) plt.legend() plt.subplot(1, 2, 2) for idx in range(len(plot_label)): plt.plot(bins, accuracies_in_bins_list[idx], 'o-', label="{} ECE = {:.2f}".format(plot_label[idx], ece_list[idx])) plt.plot(np.linspace(0, 1, 50), np.linspace(0, 1, 50), 'b.', label="Perfect Calibration") plt.title("Reliability Plot") plt.xlabel("Confidence") plt.ylabel("Accuracy") plt.grid() plt.legend() plt.show() return ece_list, accuracies_in_bins_list, frac_samples_in_bins_list, confidences_in_bins_list def plot_risk_vs_rejection_rate(y_true, y_prob, y_pred, selection_scores=None, plot_label=[""], risk_func=None, attributes=None, num_bins=10, subgroup_ids=None): """ Plots the risk vs rejection rate curve showing the risk for different rejection rates. Multiple curves can be plot by passing data as lists. Args: y_true: array-like or or a list of array-like of shape (n_samples,) ground truth labels. y_prob: array-like or or a list of array-like of shape (n_samples, n_classes). Probability scores from the base model. y_pred: array-like or or a list of array-like of shape (n_samples,) predicted labels. selection_scores: ndarray or a list of ndarray containing scores corresponding to certainty in the predicted labels. risk_func: risk function under consideration. attributes: (optional) if risk function is a fairness metric also pass the protected attribute name. num_bins: number of bins. subgroup_ids: (optional) ndarray or a list of ndarray containing subgroup_ids to selectively compute risk on a subgroup of the samples specified by subgroup_ids. Returns: tuple: - aurrrc_list: list containing the area under risk rejection rate curves. - rejection_rate_list: list containing the binned rejection rates. - selection_thresholds_list: list containing the binned selection thresholds. - risk_list: list containing the binned risks. """ import matplotlib.pyplot as plt if not isinstance(y_true, list): y_true, y_prob, y_pred, selection_scores, subgroup_ids = [y_true], [y_prob], [y_pred], [selection_scores], [subgroup_ids] if len(plot_label) != len(y_true): raise ValueError('y_true and plot_label should be of same length.') aurrrc_list = [] rejection_rate_list = [] risk_list = [] selection_thresholds_list = [] for idx in range(len(plot_label)): aursrc, rejection_rates, selection_thresholds, risks = area_under_risk_rejection_rate_curve( y_true[idx], y_prob[idx], y_pred[idx], selection_scores=selection_scores[idx], risk_func=risk_func, attributes=attributes, num_bins=num_bins, subgroup_ids=subgroup_ids[idx], return_counts=True ) aurrrc_list.append(aursrc) rejection_rate_list.append(rejection_rates) risk_list.append(risks) selection_thresholds_list.append(selection_thresholds) plt.figure(figsize=(12, 5)) plt.subplot(1, 2, 1) for idx in range(len(plot_label)): plt.plot(rejection_rate_list[idx], risk_list[idx], label="{} AURRRC={:.5f}".format(plot_label[idx], aurrrc_list[idx])) plt.legend(loc="best") plt.xlabel("Rejection Rate") if risk_func is None: ylabel = "Prediction Error Rate" else: if 'accuracy' in risk_func.__name__: ylabel = "1.0 - " + risk_func.__name__ else: ylabel = risk_func.__name__ plt.ylabel(ylabel) plt.title("Risk vs Rejection Rate Plot") plt.grid() plt.subplot(1, 2, 2) for idx in range(len(plot_label)): plt.plot(selection_thresholds_list[idx], risk_list[idx], label="{}".format(plot_label[idx])) plt.legend(loc="best") plt.xlabel("Selection Threshold") if risk_func is None: ylabel = "Prediction Error Rate" else: if 'accuracy' in risk_func.__name__: ylabel = "1.0 - " + risk_func.__name__ else: ylabel = risk_func.__name__ plt.ylabel(ylabel) plt.title("Risk vs Selection Threshold Plot") plt.grid() plt.show() return aurrrc_list, rejection_rate_list, selection_thresholds_list, risk_list
__init__.py
from .classification_metrics import expected_calibration_error, area_under_risk_rejection_rate_curve, \ compute_classification_metrics, entropy_based_uncertainty_decomposition from .regression_metrics import picp, mpiw, compute_regression_metrics, plot_uncertainty_distribution, \ plot_uncertainty_by_feature, plot_picp_by_feature from .uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve
__init__.py
from .uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve
uncertainty_characteristics_curve.py
from copy import deepcopy import matplotlib.pyplot as plt import numpy as np from scipy.integrate import simps, trapz from sklearn.isotonic import IsotonicRegression DEFAULT_X_AXIS_NAME = 'excess' DEFAULT_Y_AXIS_NAME = 'missrate' class UncertaintyCharacteristicsCurve: """ Class with main functions of the Uncertainty Characteristics Curve (UCC). """ def __init__(self, normalize=True, precompute_bias_data=True): """ :param normalize: set initial axes normalization flag (can be changed via set_coordinates()) :param precompute_bias_data: if True, fit() will compute statistics necessary to generate bias-based UCCs (in addition to the scale-based ones). Skipping this precomputation may speed up the fit() call if bias-based UCC is not needed. """ self.axes_name2idx = {"missrate": 1, "bandwidth": 2, "excess": 3, "deficit": 4} self.axes_idx2descr = {1: "Missrate", 2: "Bandwidth", 3: "Excess", 4: "Deficit"} self.x_axis_idx = None self.y_axis_idx = None self.norm_x_axis = False self.norm_y_axis = False self.std_unit = None self.normalize = normalize self.d = None self.gt = None self.lb = None self.ub = None self.precompute_bias_data = precompute_bias_data self.set_coordinates(x_axis_name=DEFAULT_X_AXIS_NAME, y_axis_name=DEFAULT_Y_AXIS_NAME, normalize=normalize) def set_coordinates(self, x_axis_name=None, y_axis_name=None, normalize=None): """ Assigns user-specified type to the axes and normalization behavior (sticky). :param x_axis_name: None-> unchanged, or name from self.axes_name2idx :param y_axis_name: ditto :param normalize: True/False will activate/deactivate norming for specified axes. Behavior for Axes_name that are None will not be changed. Value None will leave norm status unchanged. Note, axis=='missrate' will never get normalized, even with normalize == True :return: none """ normalize = self.normalize if normalize is None else normalize if x_axis_name is None and self.x_axis_idx is None: raise ValueError("ERROR(UCC): x-axis has not been defined.") if y_axis_name is None and self.y_axis_idx is None: raise ValueError("ERROR(UCC): y-axis has not been defined.") if x_axis_name is None and y_axis_name is None and normalize is not None: # just set normalization on/off for both axes and return self.norm_x_axis = False if x_axis_name == 'missrate' else normalize self.norm_y_axis = False if y_axis_name == 'missrate' else normalize return if x_axis_name is not None: self.x_axis_idx = self.axes_name2idx[x_axis_name] self.norm_x_axis = False if x_axis_name == 'missrate' else normalize if y_axis_name is not None: self.y_axis_idx = self.axes_name2idx[y_axis_name] self.norm_y_axis = False if y_axis_name == 'missrate' else normalize def set_std_unit(self, std_unit=None): """ Sets the UCC's unit to be used when displaying normalized axes. :param std_unit: if None, the unit will be calculated as stddev of the ground truth data (ValueError raised if data has not been set at this point) or set to the user-specified value. :return: """ if std_unit is None: # set it to stddev of data if self.gt is None: raise ValueError("ERROR(UCC): No data specified - cannot set stddev unit.") self.std_unit = np.std(self.gt) if np.isclose(self.std_unit, 0.): print("WARN(UCC): data-based stddev is zero - resetting axes unit to 1.") self.std_unit = 1. else: self.std_unit = float(std_unit) def fit(self, X, gt): """ Calculates internal arrays necessary for other methods (plotting, auc, cost minimization). Re-entrant. :param X: [numsamples, 3] numpy matrix, or list of numpy matrices. Col 1: predicted values Col 2: lower band (deviate) wrt predicted value (always positive) Col 3: upper band wrt predicted value (always positive) If list is provided, all methods will output corresponding metrics as lists as well! :param gt: Ground truth array (i.e.,the 'actual' values corresponding to predictions in X :return: self """ if not isinstance(X, list): X = [X] newX = [] for x in X: assert (isinstance(x, np.ndarray) and len(x.shape) == 2 and x.shape[1] == 3 and x.shape[0] == len(gt)) newX.append(self._sanitize_input(x)) self.d = [gt - x[:, 0] for x in newX] self.lb = [x[:, 1] for x in newX] self.ub = [x[:, 2] for x in newX] self.gt = gt self.set_std_unit() self.plotdata_for_scale = [] self.plotdata_for_bias = [] # precompute plotdata: for i in range(len(self.d)): self.plotdata_for_scale.append(self._calc_plotdata(self.d[i], self.lb[i], self.ub[i], vary_bias=False)) if self.precompute_bias_data: self.plotdata_for_bias.append(self._calc_plotdata(self.d[i], self.lb[i], self.ub[i], vary_bias=True)) return self def minimize_cost(self, x_axis_cost=.5, y_axis_cost=.5, augment_cost_by_normfactor=True, search=('scale', 'bias')): """ Find minima of a linear cost function for each component. Cost function C = x_axis_cost * x_axis_value + y_axis_cost * y_axis_value. A minimum can occur in the scale-based or bias-based UCC (this can be constrained by the 'search' arg). The function returns a 'recipe' how to achieve the corresponding minimum, for each component. :param x_axis_cost: weight of one unit on x_axis :param y_axis_cost: weight of one unit on y_axis :param augment_cost_by_normfactor: when False, the cost multipliers will apply as is. If True, they will be pre-normed by the corresponding axis norm (where applicable), to account for range differences between axes. :param search: list of types over which minimization is to be performed, valid elements are 'scale' and 'bias'. :return: list of dicts - one per component, or a single dict, if there is only one component. Dict keys are - 'operation': can be 'bias' (additive) or 'scale' (multiplicative), 'modvalue': value to multiply by or to add to error bars to achieve the minimum, 'new_x'/'new_y': new coordinates (operating point) with that minimum, 'cost': new cost at minimum point, 'original_cost': original cost (original operating point). """ if self.d is None: raise ValueError("ERROR(UCC): call fit() prior to using this method.") if augment_cost_by_normfactor: if self.norm_x_axis: x_axis_cost /= self.std_unit if self.norm_y_axis: y_axis_cost /= self.std_unit print("INFO(UCC): Pre-norming costs by corresp. std deviation: new x_axis_cost = %.4f, y_axis_cost = %.4f" % (x_axis_cost, y_axis_cost)) if isinstance(search, tuple): search = list(search) if not isinstance(search, list): search = [search] min_costs = [] for d in range(len(self.d)): # original OP cost m, b, e, df = self._calc_missrate_bandwidth_excess_deficit(self.d[d], self.lb[d], self.ub[d]) original_cost = x_axis_cost * [0., m, b, e, df][self.x_axis_idx] + y_axis_cost * [0., m, b, e, df][ self.y_axis_idx] plotdata = self.plotdata_for_scale[d] cost_scale, minidx_scale = self._find_min_cost_in_component(plotdata, self.x_axis_idx, self.y_axis_idx, x_axis_cost, y_axis_cost) mcf_scale_multiplier = plotdata[minidx_scale][0] mcf_scale_x = plotdata[minidx_scale][self.x_axis_idx] mcf_scale_y = plotdata[minidx_scale][self.y_axis_idx] if 'bias' in search: if not self.precompute_bias_data: raise ValueError( "ERROR(UCC): Cannot perform minimization - instantiated without bias data computation") plotdata = self.plotdata_for_bias[d] cost_bias, minidx_bias = self._find_min_cost_in_component(plotdata, self.x_axis_idx, self.y_axis_idx, x_axis_cost, y_axis_cost) mcf_bias_add = plotdata[minidx_bias][0] mcf_bias_x = plotdata[minidx_bias][self.x_axis_idx] mcf_bias_y = plotdata[minidx_bias][self.y_axis_idx] if 'bias' in search and 'scale' in search: if cost_bias < cost_scale: min_costs.append({'operation': 'bias', 'cost': cost_bias, 'modvalue': mcf_bias_add, 'new_x': mcf_bias_x, 'new_y': mcf_bias_y, 'original_cost': original_cost}) else: min_costs.append({'operation': 'scale', 'cost': cost_scale, 'modvalue': mcf_scale_multiplier, 'new_x': mcf_scale_x, 'new_y': mcf_scale_y, 'original_cost': original_cost}) elif 'scale' in search: min_costs.append({'operation': 'scale', 'cost': cost_scale, 'modvalue': mcf_scale_multiplier, 'new_x': mcf_scale_x, 'new_y': mcf_scale_y, 'original_cost': original_cost}) elif 'bias' in search: min_costs.append({'operation': 'bias', 'cost': cost_bias, 'modvalue': mcf_bias_add, 'new_x': mcf_bias_x, 'new_y': mcf_bias_y, 'original_cost': original_cost}) else: raise ValueError("(ERROR): Unknown search element (%s) requested." % ",".join(search)) if len(min_costs) < 2: return min_costs[0] else: return min_costs def get_specific_operating_point(self, req_x_axis_value=None, req_y_axis_value=None, req_critical_value=None, vary_bias=False): """ Finds corresponding operating point on the current UCC, given a point on either x or y axis. Returns a list of recipes how to achieve the point (x,y), for each component. If there is only one component, returns a single recipe dict. :param req_x_axis_value: requested x value on UCC (normalization status is taken from current display) :param req_y_axis_value: requested y value on UCC (normalization status is taken from current display) :param vary_bias: set to True when referring to bias-induced UCC (scale UCC default) :return: list of dicts (recipes), or a single dict """ if self.d is None: raise ValueError("ERROR(UCC): call fit() prior to using this method.") if np.sum([req_x_axis_value is not None, req_y_axis_value is not None, req_critical_value is not None]) != 1: raise ValueError("ERROR(UCC): exactly one axis value must be requested at a time.") if vary_bias and not self.precompute_bias_data: raise ValueError("ERROR(UCC): Cannot vary bias - instantiated without bias data computation") xnorm = self.std_unit if self.norm_x_axis else 1. ynorm = self.std_unit if self.norm_y_axis else 1. recipe = [] for dc in range(len(self.d)): plotdata = self.plotdata_for_bias[dc] if vary_bias else self.plotdata_for_scale[dc] if req_x_axis_value is not None: tgtidx = self.x_axis_idx req_value = req_x_axis_value * xnorm elif req_y_axis_value is not None: tgtidx = self.y_axis_idx req_value = req_y_axis_value * ynorm elif req_critical_value is not None: req_value = req_critical_value tgtidx = 0 # first element in plotdata is always the critical value (scale of bias) else: raise RuntimeError("Unhandled case") closestidx = np.argmin(np.asarray([np.abs(p[tgtidx] - req_value) for p in plotdata])) recipe.append({'operation': ('bias' if vary_bias else 'scale'), 'modvalue': plotdata[closestidx][0], 'new_x': plotdata[closestidx][self.x_axis_idx] / xnorm, 'new_y': plotdata[closestidx][self.y_axis_idx] / ynorm}) if len(recipe) < 2: return recipe[0] else: return recipe def _find_min_cost_in_component(self, plotdata, idx1, idx2, cost1, cost2): """ Find s minimum cost function value and corresp. position index in plotdata :param plotdata: liste of tuples :param idx1: idx of x-axis item within the tuple :param idx2: idx of y-axis item within the tuple :param cost1: cost factor for x-axis unit :param cost2: cost factor for y-axis unit :return: min cost value, index within plotdata where minimum occurs """ raw = [cost1 * i[idx1] + cost2 * i[idx2] for i in plotdata] minidx = np.argmin(raw) return raw[minidx], minidx def _sanitize_input(self, x): """ Replaces problematic values in input data (e.g, zero error bars) :param x: single matrix of input data [n, 3] :return: sanitized version of x """ if np.isclose(np.sum(x[:, 1]), 0.): raise ValueError("ERROR(UCC): Provided lower bands are all zero.") if np.isclose(np.sum(x[:, 2]), 0.): raise ValueError("ERROR(UCC): Provided upper bands are all zero.") for i in [1, 2]: if any(np.isclose(x[:, i], 0.)): print("WARN(UCC): some band values are 0. - REPLACING with positive minimum") m = np.min(x[x[:, i] > 0, i]) x = np.where(np.isclose(x, 0.), m, x) return x def _calc_avg_excess(self, d, lb, ub): """ Excess is amount an error bar overshoots actual :param d: pred-actual array :param lb: lower band :param ub: upper band :return: average excess over array """ excess = np.zeros(d.shape) posidx = np.where(d >= 0)[0] excess[posidx] = np.where(ub[posidx] - d[posidx] < 0., 0., ub[posidx] - d[posidx]) negidx = np.where(d < 0)[0] excess[negidx] = np.where(lb[negidx] + d[negidx] < 0., 0., lb[negidx] + d[negidx]) return np.mean(excess) def _calc_avg_deficit(self, d, lb, ub): """ Deficit is error bar insufficiency: bar falls short of actual :param d: pred-actual array :param lb: lower band :param ub: upper band :return: average deficit over array """ deficit = np.zeros(d.shape) posidx = np.where(d >= 0)[0] deficit[posidx] = np.where(- ub[posidx] + d[posidx] < 0., 0., - ub[posidx] + d[posidx]) negidx = np.where(d < 0)[0] deficit[negidx] = np.where(- lb[negidx] - d[negidx] < 0., 0., - lb[negidx] - d[negidx]) return np.mean(deficit) def _calc_missrate_bandwidth_excess_deficit(self, d, lb, ub, scale=1.0, bias=0.0): """ Calculates recall at a given scale/bias, average bandwidth and average excess :param d: delta :param lb: lower band :param ub: upper band :param scale: scale * (x + bias) :param bias: :return: miss rate, average bandwidth, avg excess, avg deficit """ abslband = scale * np.where((lb + bias) < 0., 0., lb + bias) absuband = scale * np.where((ub + bias) < 0., 0., ub + bias) recall = np.sum((d >= - abslband) & (d <= absuband)) / len(d) avgbandwidth = np.mean([absuband, abslband]) avgexcess = self._calc_avg_excess(d, abslband, absuband) avgdeficit = self._calc_avg_deficit(d, abslband, absuband) return 1 - recall, avgbandwidth, avgexcess, avgdeficit def _calc_plotdata(self, d, lb, ub, vary_bias=False): """ Generates data necessary for various UCC metrics. :param d: delta (predicted - actual) vector :param ub: upper uncertainty bandwidth (above predicted) :param lb: lower uncertainty bandwidth (below predicted) - all positive (bandwidth) :param vary_bias: True will switch to additive bias instead of scale :return: list. Elements are tuples (varyvalue, missrate, bandwidth, excess, deficit) """ # step 1: collect critical scale or bias values critval = [] for i in range(len(d)): if not vary_bias: if d[i] >= 0: critval.append(d[i] / ub[i]) else: critval.append(-d[i] / lb[i]) else: if d[i] >= 0: critval.append(d[i] - ub[i]) else: critval.append(-lb[i] - d[i]) critval = sorted(critval) plotdata = [] for i in range(len(critval)): if not vary_bias: missrate, bandwidth, excess, deficit = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub, scale=critval[i]) else: missrate, bandwidth, excess, deficit = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub, bias=critval[i]) plotdata.append((critval[i], missrate, bandwidth, excess, deficit)) return plotdata def get_AUUCC(self, vary_bias=False, aucfct="trapz", partial_x=None, partial_y=None): """ returns approximate area under the curve on current coordinates, for each component. :param vary_bias: False == varies scale, True == varies bias :param aucfct: specifies AUC integrator (can be "trapz", "simps") :param partial_x: tuple (x_min, x_max) defining interval on x to calc a a partial AUC. The interval bounds refer to axes as visualized (ie. potentially normed) :param partial_y: tuple (y_min, y_max) defining interval on y to calc a a partial AUC. partial_x must be None. :return: list of floats with AUUCCs for each input component, or a single float, if there is only 1 component. """ if self.d is None: raise ValueError("ERROR(UCC): call fit() prior to using this method.") if vary_bias and not self.precompute_bias_data: raise ValueError("ERROR(UCC): Cannot vary bias - instantiated without bias data computation") if partial_x is not None and partial_y is not None: raise ValueError("ERROR(UCC): partial_x and partial_y can not be specified at the same time.") assert(partial_x is None or (isinstance(partial_x, tuple) and len(partial_x)==2)) assert(partial_y is None or (isinstance(partial_y, tuple) and len(partial_y)==2)) # find starting point (where the x axis value starts to actually change) rv = [] # do this for individual streams xind = self.x_axis_idx aucfct = simps if aucfct == "simps" else trapz for s in range(len(self.d)): plotdata = self.plotdata_for_bias[s] if vary_bias else self.plotdata_for_scale[s] prev = plotdata[0][xind] t = 1 cval = plotdata[t][xind] while cval == prev and t < len(plotdata) - 1: t += 1 prev = cval cval = plotdata[t][xind] startt = t - 1 # from here, it's a valid function endtt = len(plotdata) if startt >= endtt - 2: rvs = 0. # no area else: xnorm = self.std_unit if self.norm_x_axis else 1. ynorm = self.std_unit if self.norm_y_axis else 1. y=[(plotdata[i][self.y_axis_idx]) / ynorm for i in range(startt, endtt)] x=[(plotdata[i][self.x_axis_idx]) / xnorm for i in range(startt, endtt)] if partial_x is not None: from_i = self._find_closest_index(partial_x[0], x) to_i = self._find_closest_index(partial_x[1], x) + 1 elif partial_y is not None: from_i = self._find_closest_index(partial_y[0], y) to_i = self._find_closest_index(partial_y[1], y) if from_i > to_i: # y is in reverse order from_i, to_i = to_i, from_i to_i += 1 # as upper bound in array indexing else: from_i = 0 to_i = len(x) to_i = min(to_i, len(x)) if to_i < from_i: raise ValueError("ERROR(UCC): Failed to find an appropriate partial-AUC interval in the data.") if to_i - from_i < 2: raise RuntimeError("ERROR(UCC): There are too few samples (1) in the partial-AUC interval specified") rvs = aucfct(x=x[from_i:to_i], y=y[from_i:to_i]) rv.append(rvs) if len(rv) < 2: return rv[0] else: return rv @ staticmethod def _find_closest_index(value, array): """ Returns an index of the 'array' element closest in value to 'value' :param value: :param array: :return: """ return np.argmin(np.abs(np.asarray(array)-value)) def _get_single_OP(self, d, lb, ub, scale=1., bias=0.): """ Returns Operating Point for original input data, on coordinates currently set up, given a scale/bias. :param scale: :param bias: :return: single tuple (x point, y point, unit of x, unit of y) """ xnorm = self.std_unit if self.norm_x_axis else 1. ynorm = self.std_unit if self.norm_y_axis else 1. auxop = self._calc_missrate_bandwidth_excess_deficit(d, lb, ub, scale=scale, bias=bias) op = [0.] + [i for i in auxop] # mimic plotdata (first element ignored here) return (op[self.x_axis_idx] / xnorm, op[self.y_axis_idx] / ynorm, xnorm, ynorm) def get_OP(self, scale=1., bias=0.): """ Returns all Operating Points for original input data, on coordinates currently set up, given a scale/bias. :param scale: :param bias: :return: list of tuples (x point, y point, unit of x, unit of y) or a single tuple if there is only 1 component. """ if self.d is None: raise ValueError("ERROR(UCC): call fit() prior to using this method.") op = [] for dc in range(len(self.d)): op.append(self._get_single_OP(self.d[dc], self.lb[dc], self.ub[dc], scale=scale, bias=bias)) if len(op) < 2: return op[0] else: return op def plot_UCC(self, titlestr='', syslabel='model', outfn=None, vary_bias=False, markers=None, xlim=None, ylim=None, **kwargs): """ Will plot/display the UCC based on current data and coordinates. Multiple curves will be shown if there are multiple data components (via fit()) :param titlestr: Plot title string :param syslabel: list is label strings to appear in the plot legend. Can be single, if one component. :param outfn: base name of an image file to be created (will append .png before creating) :param vary_bias: True will switch to varying additive bias (default is multiplicative scale) :param markers: None or a list of marker styles to be used for each curve. List must be same or longer than number of components. Markers can be one among these ['o', 's', 'v', 'D', '+']. :param xlim: tuples or lists of specifying the range for the x axis, or None (auto) :param ylim: tuples or lists of specifying the range for the y axis, or None (auto) :param `**kwargs`: Additional arguments passed to the main plot call. :return: list of areas under the curve (or single area, if one data component) list of operating points (or single op): format of an op is tuple (xaxis value, yaxis value, xunit, yunit) """ if self.d is None: raise ValueError("ERROR(UCC): call fit() prior to using this method.") if vary_bias and not self.precompute_bias_data: raise ValueError("ERROR(UCC): Cannot vary bias - instantiated without bias data computation") if not isinstance(syslabel, list): syslabel = [syslabel] assert (len(syslabel) == len(self.d)) assert (markers is None or (isinstance(markers, list) and len(markers) >= len(self.d))) # main plot of (possibly multiple) datasets plt.figure() xnorm = self.std_unit if self.norm_x_axis else 1. ynorm = self.std_unit if self.norm_y_axis else 1. op_info = [] auucc = self.get_AUUCC(vary_bias=vary_bias) auucc = [auucc] if not isinstance(auucc, list) else auucc for s in range(len(self.d)): # original operating point x_op, y_op, x_unit, y_unit = self._get_single_OP(self.d[s], self.lb[s], self.ub[s]) op_info.append((x_op, y_op, x_unit, y_unit)) # display chart plotdata = self.plotdata_for_scale[s] if not vary_bias else self.plotdata_for_bias[s] axisX_data = [i[self.x_axis_idx] / xnorm for i in plotdata] axisY_data = [i[self.y_axis_idx] / ynorm for i in plotdata] marker = None if markers is not None: marker = markers[s] p = plt.plot(axisX_data, axisY_data, label=syslabel[s] + (" (AUC=%.3f)" % auucc[s]), marker=marker, **kwargs) if s + 1 == len(self.d): oplab = 'OP' else: oplab = None plt.plot(x_op, y_op, marker='o', color=p[0].get_color(), label=oplab, markerfacecolor='w', markeredgewidth=1.5, markeredgecolor=p[0].get_color()) axisX_label = self.axes_idx2descr[self.x_axis_idx] axisY_label = self.axes_idx2descr[self.y_axis_idx] axisX_units = "(raw)" if np.isclose(xnorm, 1.0) else "[in std deviations]" axisY_units = "(raw)" if np.isclose(ynorm, 1.0) else "[in std deviations]" axisX_label += ' ' + axisX_units axisY_label += ' ' + axisY_units if ylim is not None: plt.ylim(ylim) if xlim is not None: plt.xlim(xlim) plt.xlabel(axisX_label) plt.ylabel(axisY_label) plt.legend() plt.title(titlestr) plt.grid() if outfn is None: plt.show() else: plt.savefig(outfn) if len(auucc) < 2: auucc = auucc[0] op_info = op_info[0] return auucc, op_info
heteroscedastic_mlp.py
import torch import torch.nn.functional as F from uq360.models.noise_models.heteroscedastic_noise_models import GaussianNoise class GaussianNoiseMLPNet(torch.nn.Module): def __init__(self, num_features, num_outputs, num_hidden): super(GaussianNoiseMLPNet, self).__init__() self.fc = torch.nn.Linear(num_features, num_hidden) self.fc_mu = torch.nn.Linear(num_hidden, num_outputs) self.fc_log_var = torch.nn.Linear(num_hidden, num_outputs) self.noise_layer = GaussianNoise() def forward(self, x): x = F.relu(self.fc(x)) mu = self.fc_mu(x) log_var = self.fc_log_var(x) return mu, log_var def loss(self, y_true=None, mu_pred=None, log_var_pred=None): return self.noise_layer.loss(y_true, mu_pred, log_var_pred, reduce_mean=True)
__init__.py
null
layer_utils.py
""" Contains implementations of various utilities used by Horseshoe Bayesian layers """ import numpy as np import torch from torch.nn import Parameter td = torch.distributions gammaln = torch.lgamma def diag_gaussian_entropy(log_std, D): return 0.5 * D * (1.0 + torch.log(2 * np.pi)) + torch.sum(log_std) def inv_gamma_entropy(a, b): return torch.sum(a + torch.log(b) + torch.lgamma(a) - (1 + a) * torch.digamma(a)) def log_normal_entropy(log_std, mu, D): return torch.sum(log_std + mu + 0.5) + (D / 2) * np.log(2 * np.pi) class InvGammaHalfCauchyLayer(torch.nn.Module): """ Uses the inverse Gamma parameterization of the half-Cauchy distribution. a ~ C^+(0, b) <==> a^2 ~ IGamma(0.5, 1/lambda), lambda ~ IGamma(0.5, 1/b^2), where lambda is an auxiliary latent variable. Uses a factorized variational approximation q(ln a^2)q(lambda) = N(mu, sigma^2) IGamma(ahat, bhat). This layer places a half Cauchy prior on the scales of each output node of the layer. """ def __init__(self, out_features, b): """ :param out_fatures: number of output nodes in the layer. :param b: scale of the half Cauchy """ super(InvGammaHalfCauchyLayer, self).__init__() self.b = b self.out_features = out_features # variational parameters for q(ln a^2) self.mu = Parameter(torch.FloatTensor(out_features)) self.log_sigma = Parameter(torch.FloatTensor(out_features)) # self.log_sigma = torch.FloatTensor(out_features) # variational parameters for q(lambda). These will be updated via fixed point updates, hence not parameters. self.ahat = torch.FloatTensor([1.]) # The posterior parameter is always 1. self.bhat = torch.ones(out_features) * (1.0 / self.b ** 2) self.const = torch.FloatTensor([0.5]) self.initialize_from_prior() def initialize_from_prior(self): """ Initializes variational parameters by sampling from the prior. """ # sample from half cauchy and log to initialize the mean of the log normal sample = np.abs(self.b * (np.random.randn(self.out_features) / np.random.randn(self.out_features))) self.mu.data = torch.FloatTensor(np.log(sample)) self.log_sigma.data = torch.FloatTensor(np.random.randn(self.out_features) - 10.) def expectation_wrt_prior(self): """ Computes E[ln p(a^2 | lambda)] + E[ln p(lambda)] """ expected_a_given_lambda = -gammaln(self.const) - 0.5 * (torch.log(self.bhat) - torch.digamma(self.ahat)) + ( -0.5 - 1.) * self.mu - torch.exp(-self.mu + 0.5 * self.log_sigma.exp() ** 2) * (self.ahat / self.bhat) expected_lambda = -gammaln(self.const) - 2 * 0.5 * np.log(self.b) + (-self.const - 1.) * ( torch.log(self.bhat) - torch.digamma(self.ahat)) - (1. / self.b ** 2) * (self.ahat / self.bhat) return torch.sum(expected_a_given_lambda) + torch.sum(expected_lambda) def entropy(self): """ Computes entropy of q(ln a^2) and q(lambda) """ return self.entropy_lambda() + self.entropy_a2() def entropy_lambda(self): return inv_gamma_entropy(self.ahat, self.bhat) def entropy_a2(self): return log_normal_entropy(self.log_sigma, self.mu, self.out_features) def kl(self): """ Computes KL(q(ln(a^2)q(lambda) || IG(a^2 | 0.5, 1/lambda) IG(lambda | 0.5, 1/b^2)) """ return -self.expectation_wrt_prior() - self.entropy() def fixed_point_updates(self): # update lambda moments self.bhat = torch.exp(-self.mu + 0.5 * self.log_sigma.exp() ** 2) + (1. / self.b ** 2) class InvGammaLayer(torch.nn.Module): """ Approximates the posterior of c^2 with prior IGamma(c^2 | a , b) using a log Normal approximation q(ln c^2) = N(mu, sigma^2) """ def __init__(self, a, b, out_features=1): super(InvGammaLayer, self).__init__() self.a = torch.FloatTensor([a]) self.b = torch.FloatTensor([b]) # variational parameters for q(ln c^2) self.mu = Parameter(torch.FloatTensor(out_features)) self.log_sigma = Parameter(torch.FloatTensor(out_features)) self.out_features = out_features self.initialize_from_prior() def initialize_from_prior(self): """ Initializes variational parameters by sampling from the prior. """ self.mu.data = torch.log(self.b / (self.a + 1) * torch.ones(self.out_features)) # initialize at the mode self.log_sigma.data = torch.FloatTensor(np.random.randn(self.out_features) - 10.) def expectation_wrt_prior(self): """ Computes E[ln p(c^2 | a, b)] """ # return self.c_a * np.log(self.c_b) - gammaln(self.c_a) + ( # - self.c_a - 1) * c_mu - self.c_b * Ecinv return self.a * torch.log(self.b) - gammaln(self.a) + (- self.a - 1) \ * self.mu - self.b * torch.exp(-self.mu + 0.5 * self.log_sigma.exp() ** 2) def entropy(self): return log_normal_entropy(self.log_sigma, self.mu, 1) def kl(self): """ Computes KL(q(ln(c^2) || IG(c^2 | a, b)) """ return -self.expectation_wrt_prior().sum() - self.entropy()
layers.py
""" Contains implementations of various Bayesian layers """ import numpy as np import torch import torch.nn.functional as F from torch.nn import Parameter from uq360.models.bayesian_neural_networks.layer_utils import InvGammaHalfCauchyLayer, InvGammaLayer td = torch.distributions def reparam(mu, logvar, do_sample=True, mc_samples=1): if do_sample: std = torch.exp(0.5 * logvar) eps = torch.FloatTensor(std.size()).normal_() sample = mu + eps * std for _ in np.arange(1, mc_samples): sample += mu + eps * std return sample / mc_samples else: return mu class BayesianLinearLayer(torch.nn.Module): """ Affine layer with N(0, v/H) or N(0, user specified v) priors on weights and fully factorized variational Gaussian approximation """ def __init__(self, in_features, out_features, cuda=False, init_weight=None, init_bias=None, prior_stdv=None): super(BayesianLinearLayer, self).__init__() self.cuda = cuda self.in_features = in_features self.out_features = out_features # weight mean params self.weights = Parameter(torch.Tensor(out_features, in_features)) self.bias = Parameter(torch.Tensor(out_features)) # weight variance params self.weights_logvar = Parameter(torch.Tensor(out_features, in_features)) self.bias_logvar = Parameter(torch.Tensor(out_features)) # numerical stability self.fudge_factor = 1e-8 if not prior_stdv: # We will use a N(0, 1/num_inputs) prior over weights self.prior_stdv = torch.FloatTensor([1. / np.sqrt(self.weights.size(1))]) else: self.prior_stdv = torch.FloatTensor([prior_stdv]) # self.prior_stdv = torch.Tensor([1. / np.sqrt(1e+3)]) self.prior_mean = torch.FloatTensor([0.]) # for Bias use a prior of N(0, 1) self.prior_bias_stdv = torch.FloatTensor([1.]) self.prior_bias_mean = torch.FloatTensor([0.]) # init params either random or with pretrained net self.init_parameters(init_weight, init_bias) def init_parameters(self, init_weight, init_bias): # init means if init_weight is not None: self.weights.data = torch.Tensor(init_weight) else: self.weights.data.normal_(0, np.float(self.prior_stdv.numpy()[0])) if init_bias is not None: self.bias.data = torch.Tensor(init_bias) else: self.bias.data.normal_(0, 1) # init variances self.weights_logvar.data.normal_(-9, 1e-2) self.bias_logvar.data.normal_(-9, 1e-2) def forward(self, x, do_sample=True, scale_variances=False): # local reparameterization trick mu_activations = F.linear(x, self.weights, self.bias) var_activations = F.linear(x.pow(2), self.weights_logvar.exp(), self.bias_logvar.exp()) if scale_variances: activ = reparam(mu_activations, var_activations.log() - np.log(self.in_features), do_sample=do_sample) else: activ = reparam(mu_activations, var_activations.log(), do_sample=do_sample) return activ def kl(self): """ KL divergence (q(W) || p(W)) :return: """ weights_logvar = self.weights_logvar kld_weights = self.prior_stdv.log() - weights_logvar.mul(0.5) + \ (weights_logvar.exp() + (self.weights.pow(2) - self.prior_mean)) / ( 2 * self.prior_stdv.pow(2)) - 0.5 kld_bias = self.prior_bias_stdv.log() - self.bias_logvar.mul(0.5) + \ (self.bias_logvar.exp() + (self.bias.pow(2) - self.prior_bias_mean)) / ( 2 * self.prior_bias_stdv.pow(2)) \ - 0.5 return kld_weights.sum() + kld_bias.sum() class HorseshoeLayer(BayesianLinearLayer): """ Uses non-centered parametrization. w_k = v*tau_k*beta_k where k indexes an output unit and w_k and beta_k are vectors of all weights incident into the unit """ def __init__(self, in_features, out_features, cuda=False, scale=1.): super(HorseshoeLayer, self).__init__(in_features, out_features) self.cuda = cuda self.in_features = in_features self.out_features = out_features self.nodescales = InvGammaHalfCauchyLayer(out_features=out_features, b=1.) self.layerscale = InvGammaHalfCauchyLayer(out_features=1, b=scale) # prior on beta is N(0, I) when employing non centered parameterization self.prior_stdv = torch.Tensor([1]) self.prior_mean = torch.Tensor([0.]) def forward(self, x, do_sample=True, debug=False, eps_scale=None, eps_w=None): # At a particular unit k, preactivation_sample = scale_sample * pre_activation_sample # sample scales scale_mean = 0.5 * (self.nodescales.mu + self.layerscale.mu) scale_var = 0.25 * (self.nodescales.log_sigma.exp() ** 2 + self.layerscale.log_sigma.exp() ** 2) scale_sample = reparam(scale_mean, scale_var.log(), do_sample=do_sample).exp() # sample preactivations mu_activations = F.linear(x, self.weights, self.bias) var_activations = F.linear(x.pow(2), self.weights_logvar.exp(), self.bias_logvar.exp()) activ_sample = reparam(mu_activations, var_activations.log(), do_sample=do_sample) return scale_sample * activ_sample def kl(self): return super(HorseshoeLayer, self).kl() + self.nodescales.kl() + self.layerscale.kl() def fixed_point_updates(self): self.nodescales.fixed_point_updates() self.layerscale.fixed_point_updates() class RegularizedHorseshoeLayer(HorseshoeLayer): """ Uses the regularized Horseshoe distribution. The regularized Horseshoe soft thresholds the tails of the Horseshoe. For all weights w_k incident upon node k in the layer we have: w_k ~ N(0, (tau_k * v)^2 I) N(0, c^2 I), c^2 ~ InverseGamma(c_a, b). c^2 controls the scale of the thresholding. As c^2 -> infinity, the regularized Horseshoe -> Horseshoe. """ def __init__(self, in_features, out_features, cuda=False, scale=1., c_a=2., c_b=6.): super(RegularizedHorseshoeLayer, self).__init__(in_features, out_features, cuda=cuda, scale=scale) self.c = InvGammaLayer(a=c_a, b=c_b) def forward(self, x, do_sample=True, **kwargs): # At a particular unit k, preactivation_sample = scale_sample * pre_activation_sample # sample regularized scales scale_mean = self.nodescales.mu + self.layerscale.mu scale_var = self.nodescales.log_sigma.exp() ** 2 + self.layerscale.log_sigma.exp() ** 2 scale_sample = reparam(scale_mean, scale_var.log(), do_sample=do_sample).exp() c_sample = reparam(self.c.mu, 2 * self.c.log_sigma, do_sample=do_sample).exp() regularized_scale_sample = (c_sample * scale_sample) / (c_sample + scale_sample) # sample preactivations mu_activations = F.linear(x, self.weights, self.bias) var_activations = F.linear(x.pow(2), self.weights_logvar.exp(), self.bias_logvar.exp()) activ_sample = reparam(mu_activations, var_activations.log(), do_sample=do_sample) return torch.sqrt(regularized_scale_sample) * activ_sample def kl(self): return super(RegularizedHorseshoeLayer, self).kl() + self.c.kl() class NodeSpecificRegularizedHorseshoeLayer(RegularizedHorseshoeLayer): """ Uses the regularized Horseshoe distribution. The regularized Horseshoe soft thresholds the tails of the Horseshoe. For all weights w_k incident upon node k in the layer we have: w_k ~ N(0, (tau_k * v)^2 I) N(0, c_k^2 I), c_k^2 ~ InverseGamma(a, b). c_k^2 controls the scale of the thresholding. As c_k^2 -> infinity, the regularized Horseshoe -> Horseshoe Note that we now have a per-node c_k. """ def __init__(self, in_features, out_features, cuda=False, scale=1., c_a=2., c_b=6.): super(NodeSpecificRegularizedHorseshoeLayer, self).__init__(in_features, out_features, cuda=cuda, scale=scale) self.c = InvGammaLayer(a=c_a, b=c_b, out_features=out_features)
__init__.py
null
misc.py
import numpy as np import torch from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseFixedPrecision def compute_test_ll(y_test, y_pred_samples, std_y=1.): """ Computes test log likelihoods = (1 / Ntest) * \sum_n p(y_n | x_n, D_train) :param y_test: True y :param y_pred_samples: y^s = f(x_test, w^s); w^s ~ q(w). S x Ntest, where S is the number of samples q(w) is either a trained variational posterior or an MCMC approximation to p(w | D_train) :param std_y: True std of y (assumed known) """ S, _ = y_pred_samples.shape noise = GaussianNoiseFixedPrecision(std_y=std_y) ll = noise.loss(y_pred=y_pred_samples, y_true=y_test.unsqueeze(dim=0), reduce_sum=False) ll = torch.logsumexp(ll, dim=0) - np.log(S) # mean over num samples return torch.mean(ll) # mean over test points
horseshoe_mlp.py
from abc import ABC import numpy as np import torch from torch import nn from uq360.models.bayesian_neural_networks.layers import HorseshoeLayer, BayesianLinearLayer, RegularizedHorseshoeLayer from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseGammaPrecision import numpy as np td = torch.distributions class HshoeBNN(nn.Module, ABC): """ Bayesian neural network with Horseshoe layers. """ def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu', num_layers=1, hshoe_scale=1e-1, use_reg_hshoe=False): if use_reg_hshoe: layer = RegularizedHorseshoeLayer else: layer = HorseshoeLayer super(HshoeBNN, self).__init__() self.num_layers = num_layers if activation_type == 'relu': # activation self.activation = nn.ReLU() elif activation_type == 'tanh': self.activation = nn.Tanh() else: print("Activation Type not supported") self.fc_hidden = [] self.fc1 = layer(ip_dim, num_nodes, scale=hshoe_scale) for _ in np.arange(self.num_layers - 1): self.fc_hidden.append(layer(num_nodes, num_nodes)) self.fc_out = BayesianLinearLayer(num_nodes, op_dim) self.noise_layer = None def forward(self, x, do_sample=True): x = self.fc1(x, do_sample=do_sample) x = self.activation(x) for layer in self.fc_hidden: x = layer(x, do_sample=do_sample) x = self.activation(x) return self.fc_out(x, do_sample=do_sample, scale_variances=True) def kl_divergence_w(self): kld = self.fc1.kl() + self.fc_out.kl() for layer in self.fc_hidden: kld += layer.kl() return kld def fixed_point_updates(self): if hasattr(self.fc1, 'fixed_point_updates'): self.fc1.fixed_point_updates() if hasattr(self.fc_out, 'fixed_point_updates'): self.fc_out.fixed_point_updates() for layer in self.fc_hidden: if hasattr(layer, 'fixed_point_updates'): layer.fixed_point_updates() def prior_predictive_samples(self, n_sample=100): n_eval = 1000 x = torch.linspace(-2, 2, n_eval)[:, np.newaxis] y = np.zeros([n_sample, n_eval]) for i in np.arange(n_sample): y[i] = self.forward(x).data.numpy().ravel() return x.data.numpy(), y ### get and set weights ### def get_weights(self): assert len(self.fc_hidden) == 0 # only works for one layer networks. weight_dict = {} weight_dict['layerip_means'] = torch.cat([self.fc1.weights, self.fc1.bias.unsqueeze(1)], dim=1).data.numpy() weight_dict['layerip_logvar'] = torch.cat([self.fc1.weights_logvar, self.fc1.bias_logvar.unsqueeze(1)], dim=1).data.numpy() weight_dict['layerop_means'] = torch.cat([self.fc_out.weights, self.fc_out.bias.unsqueeze(1)], dim=1).data.numpy() weight_dict['layerop_logvar'] = torch.cat([self.fc_out.weights_logvar, self.fc_out.bias_logvar.unsqueeze(1)], dim=1).data.numpy() return weight_dict def set_weights(self, weight_dict): assert len(self.fc_hidden) == 0 # only works for one layer networks. to_param = lambda x: nn.Parameter(torch.Tensor(x)) self.fc1.weights = to_param(weight_dict['layerip_means'][:, :-1]) self.fc1.weights = to_param(weight_dict['layerip_logvar'][:, :-1]) self.fc1.bias = to_param(weight_dict['layerip_means'][:, -1]) self.fc1.bias_logvar = to_param(weight_dict['layerip_logvar'][:, -1]) self.fc_out.weights = to_param(weight_dict['layerop_means'][:, :-1]) self.fc_out.weights = to_param(weight_dict['layerop_logvar'][:, :-1]) self.fc_out.bias = to_param(weight_dict['layerop_means'][:, -1]) self.fc_out.bias_logvar = to_param(weight_dict['layerop_logvar'][:, -1]) class HshoeRegressionNet(HshoeBNN, ABC): """ Horseshoe net with N(y_true | f(x, w), \lambda^-1); \lambda ~ Gamma(a, b) likelihoods. """ def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu', num_layers=1, hshoe_scale=1e-5, use_reg_hshoe=False): super(HshoeRegressionNet, self).__init__(ip_dim=ip_dim, op_dim=op_dim, num_nodes=num_nodes, activation_type=activation_type, num_layers=num_layers, hshoe_scale=hshoe_scale, use_reg_hshoe=use_reg_hshoe) self.noise_layer = GaussianNoiseGammaPrecision(a0=6., b0=6.) def likelihood(self, x=None, y=None): out = self.forward(x) return -self.noise_layer.loss(y_pred=out, y_true=y) def neg_elbo(self, num_batches, x=None, y=None): # scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo. Elik = self.likelihood(x, y) neg_elbo = (self.kl_divergence_w() + self.noise_layer.kl()) / num_batches - Elik return neg_elbo def mse(self, x, y): """ scaled rmse (scaled by 1 / std_y**2) """ E_noise_precision = 1. / self.noise_layer.get_noise_var() return (0.5 * E_noise_precision * (self.forward(x, do_sample=False) - y)**2).sum() def get_noise_var(self): return self.noise_layer.get_noise_var() class HshoeClassificationNet(HshoeBNN, ABC): """ Horseshoe net with Categorical(y_true | f(x, w)) likelihoods. Use for classification. """ def __init__(self, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu', num_layers=1, hshoe_scale=1e-5, use_reg_hshoe=False): super(HshoeClassificationNet, self).__init__(ip_dim=ip_dim, op_dim=op_dim, num_nodes=num_nodes, activation_type=activation_type, num_layers=num_layers, hshoe_scale=hshoe_scale, use_reg_hshoe=use_reg_hshoe) self.noise_layer = torch.nn.CrossEntropyLoss(reduction='sum') def likelihood(self, x=None, y=None): out = self.forward(x) return -self.noise_layer(out, y) def neg_elbo(self, num_batches, x=None, y=None): # scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo. Elik = self.likelihood(x, y) neg_elbo = (self.kl_divergence_w()) / num_batches - Elik return neg_elbo
bayesian_mlp.py
from abc import ABC import torch from torch import nn from uq360.models.bayesian_neural_networks.layers import BayesianLinearLayer from uq360.models.noise_models.homoscedastic_noise_models import GaussianNoiseGammaPrecision import numpy as np td = torch.distributions class BayesianNN(nn.Module, ABC): """ Bayesian neural network with zero mean Gaussian priors over weights. """ def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu', num_layers=1): super(BayesianNN, self).__init__() self.num_layers = num_layers if activation_type == 'relu': # activation self.activation = nn.ReLU() elif activation_type == 'tanh': self.activation = nn.Tanh() else: print("Activation Type not supported") self.fc_hidden = [] self.fc1 = layer(ip_dim, num_nodes,) for _ in np.arange(self.num_layers - 1): self.fc_hidden.append(layer(num_nodes, num_nodes, )) self.fc_out = layer(num_nodes, op_dim, ) self.noise_layer = None def forward(self, x, do_sample=True): x = self.fc1(x, do_sample=do_sample) x = self.activation(x) for layer in self.fc_hidden: x = layer(x, do_sample=do_sample) x = self.activation(x) return self.fc_out(x, do_sample=do_sample, scale_variances=True) def kl_divergence_w(self): kld = self.fc1.kl() + self.fc_out.kl() for layer in self.fc_hidden: kld += layer.kl() return kld def prior_predictive_samples(self, n_sample=100): n_eval = 1000 x = torch.linspace(-2, 2, n_eval)[:, np.newaxis] y = np.zeros([n_sample, n_eval]) for i in np.arange(n_sample): y[i] = self.forward(x).data.numpy().ravel() return x.data.numpy(), y ### get and set weights ### def get_weights(self): assert len(self.fc_hidden) == 0 # only works for one layer networks. weight_dict = {} weight_dict['layerip_means'] = torch.cat([self.fc1.weights, self.fc1.bias.unsqueeze(1)], dim=1).data.numpy() weight_dict['layerip_logvar'] = torch.cat([self.fc1.weights_logvar, self.fc1.bias_logvar.unsqueeze(1)], dim=1).data.numpy() weight_dict['layerop_means'] = torch.cat([self.fc_out.weights, self.fc_out.bias.unsqueeze(1)], dim=1).data.numpy() weight_dict['layerop_logvar'] = torch.cat([self.fc_out.weights_logvar, self.fc_out.bias_logvar.unsqueeze(1)], dim=1).data.numpy() return weight_dict def set_weights(self, weight_dict): assert len(self.fc_hidden) == 0 # only works for one layer networks. to_param = lambda x: nn.Parameter(torch.Tensor(x)) self.fc1.weights = to_param(weight_dict['layerip_means'][:, :-1]) self.fc1.weights = to_param(weight_dict['layerip_logvar'][:, :-1]) self.fc1.bias = to_param(weight_dict['layerip_means'][:, -1]) self.fc1.bias_logvar = to_param(weight_dict['layerip_logvar'][:, -1]) self.fc_out.weights = to_param(weight_dict['layerop_means'][:, :-1]) self.fc_out.weights = to_param(weight_dict['layerop_logvar'][:, :-1]) self.fc_out.bias = to_param(weight_dict['layerop_means'][:, -1]) self.fc_out.bias_logvar = to_param(weight_dict['layerop_logvar'][:, -1]) class BayesianRegressionNet(BayesianNN, ABC): """ Bayesian neural net with N(y_true | f(x, w), \lambda^-1); \lambda ~ Gamma(a, b) likelihoods. """ def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu', num_layers=1): super(BayesianRegressionNet, self).__init__(layer=layer, ip_dim=ip_dim, op_dim=op_dim, num_nodes=num_nodes, activation_type=activation_type, num_layers=num_layers, ) self.noise_layer = GaussianNoiseGammaPrecision(a0=6., b0=6.) def likelihood(self, x=None, y=None): out = self.forward(x) return -self.noise_layer.loss(y_pred=out, y_true=y) def neg_elbo(self, num_batches, x=None, y=None): # scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo. Elik = self.likelihood(x, y) neg_elbo = (self.kl_divergence_w() + self.noise_layer.kl()) / num_batches - Elik return neg_elbo def mse(self, x, y): """ scaled rmse (scaled by 1 / std_y**2) """ E_noise_precision = 1. / self.noise_layer.get_noise_var() return (0.5 * E_noise_precision * (self.forward(x, do_sample=False) - y)**2).sum() def get_noise_var(self): return self.noise_layer.get_noise_var() class BayesianClassificationNet(BayesianNN, ABC): """ Bayesian neural net with Categorical(y_true | f(x, w)) likelihoods. Use for classification. """ def __init__(self, layer=BayesianLinearLayer, ip_dim=1, op_dim=1, num_nodes=50, activation_type='relu', num_layers=1): super(BayesianClassificationNet, self).__init__(layer=layer, ip_dim=ip_dim, op_dim=op_dim, num_nodes=num_nodes, activation_type=activation_type, num_layers=num_layers) self.noise_layer = torch.nn.CrossEntropyLoss(reduction='sum') def likelihood(self, x=None, y=None): out = self.forward(x) return -self.noise_layer(out, y) def neg_elbo(self, num_batches, x=None, y=None): # scale the KL terms by number of batches so that the minibatch elbo is an unbiased estiamte of the true elbo. Elik = self.likelihood(x, y) neg_elbo = self.kl_divergence_w() / num_batches - Elik return neg_elbo
__init__.py
null
homoscedastic_noise_models.py
import math import numpy as np import torch from scipy.special import gammaln from uq360.models.noise_models.noisemodel import AbstractNoiseModel from torch.nn import Parameter td = torch.distributions def transform(a): return torch.log(1 + torch.exp(a)) class GaussianNoiseGammaPrecision(torch.nn.Module, AbstractNoiseModel): """ N(y_true | f(x, w), \lambda^-1); \lambda ~ Gamma(a, b). Uses a variational approximation; q(lambda) = Gamma(ahat, bhat) """ def __init__(self, a0=6, b0=6, cuda=False): super(GaussianNoiseGammaPrecision, self).__init__() self.cuda = cuda self.a0 = a0 self.b0 = b0 self.const = torch.log(torch.FloatTensor([2 * math.pi])) # variational parameters self.ahat = Parameter(torch.FloatTensor([10.])) self.bhat = Parameter(torch.FloatTensor([3.])) def loss(self, y_pred=None, y_true=None): """ computes -1 * E_q(\lambda)[ln N (y_pred | y_true, \lambda^-1)], where q(lambda) = Gamma(ahat, bhat) :param y_pred: :param y_true: :return: """ n = y_pred.shape[0] ahat = transform(self.ahat) bhat = transform(self.bhat) return -1 * (-0.5 * n * self.const + 0.5 * n * (torch.digamma(ahat) - torch.log(bhat)) \ - 0.5 * (ahat/bhat) * ((y_pred - y_true) ** 2).sum()) def kl(self): ahat = transform(self.ahat) bhat = transform(self.bhat) return (ahat - self.a0) * torch.digamma(ahat) - torch.lgamma(ahat) + gammaln(self.a0) + \ self.a0 * (torch.log(bhat) - np.log(self.b0)) + ahat * (self.b0 - bhat) / bhat def get_noise_var(self): ahat = transform(self.ahat) bhat = transform(self.bhat) return (bhat / ahat).data.numpy()[0] class GaussianNoiseFixedPrecision(torch.nn.Module, AbstractNoiseModel): """ N(y_true | f(x, w), sigma_y**2); known sigma_y """ def __init__(self, std_y=1., cuda=False): super(GaussianNoiseFixedPrecision, self).__init__() self.cuda = cuda self.const = torch.log(torch.FloatTensor([2 * math.pi])) self.sigma_y = std_y def loss(self, y_pred=None, y_true=None): """ computes -1 * ln N (y_pred | y_true, sigma_y**2) :param y_pred: :param y_true: :return: """ ll = -0.5 * self.const - np.log(self.sigma_y) - 0.5 * (1. / self.sigma_y ** 2) * ((y_pred - y_true) ** 2) return -ll.sum(dim=0) def get_noise_var(self): return self.sigma_y ** 2
heteroscedastic_noise_models.py
import math import numpy as np import torch from scipy.special import gammaln from uq360.models.noise_models.noisemodel import AbstractNoiseModel from torch.nn import Parameter td = torch.distributions def transform(a): return torch.log(1 + torch.exp(a)) class GaussianNoise(torch.nn.Module, AbstractNoiseModel): """ N(y_true | f_\mu(x, w), f_\sigma^2(x, w)) """ def __init__(self, cuda=False): super(GaussianNoise, self).__init__() self.cuda = cuda self.const = torch.log(torch.FloatTensor([2 * math.pi])) def loss(self, y_true=None, mu_pred=None, log_var_pred=None, reduce_mean=True): """ computes -1 * ln N (y_true | mu_pred, softplus(log_var_pred)) :param y_true: :param mu_pred: :param log_var_pred: :return: """ var_pred = transform(log_var_pred) ll = -0.5 * self.const - 0.5 * torch.log(var_pred) - 0.5 * (1. / var_pred) * ((mu_pred - y_true) ** 2) if reduce_mean: return -ll.mean(dim=0) else: return -ll.sum(dim=0) def get_noise_var(self, log_var_pred): return transform(log_var_pred)
noisemodel.py
import abc import sys # Ensure compatibility with Python 2/3 if sys.version_info >= (3, 4): ABC = abc.ABC else: ABC = abc.ABCMeta(str('ABC'), (), {}) class AbstractNoiseModel(ABC): """ Abstract class. All noise models inherit from here. """ def __init__(self, *argv, **kwargs): """ Initialize an AbstractNoiseModel object. """ @abc.abstractmethod def loss(self, *argv, **kwargs): """ Compute loss given predictions and groundtruth labels """ raise NotImplementedError @abc.abstractmethod def get_noise_var(self, *argv, **kwargs): """ Return the current estimate of noise variance """ raise NotImplementedError
__init__.py
null
__init__.py
null
builtinuq.py
import abc import sys # Ensure compatibility with Python 2/3 if sys.version_info >= (3, 4): ABC = abc.ABC else: ABC = abc.ABCMeta(str('ABC'), (), {}) class BuiltinUQ(ABC): """ BuiltinUQ is the base class for any algorithm that has UQ built into it. """ def __init__(self, *argv, **kwargs): """ Initialize a BuiltinUQ object. """ @abc.abstractmethod def fit(self, *argv, **kwargs): """ Learn the UQ related parameters.. """ raise NotImplementedError @abc.abstractmethod def predict(self, *argv, **kwargs): """ Method to obtain the predicitve uncertainty, this can return the total, epistemic and/or aleatoric uncertainty in the predictions. """ raise NotImplementedError def set_params(self, **parameters): for parameter, value in parameters.items(): setattr(self, parameter, value) return self
__init__.py
null
posthocuq.py
import abc import sys # Ensure compatibility with Python 2/3 if sys.version_info >= (3, 4): ABC = abc.ABC else: ABC = abc.ABCMeta(str('ABC'), (), {}) class PostHocUQ(ABC): """ PostHocUQ is the base class for any algorithm that quantifies uncertainty of a pre-trained model. """ def __init__(self, *argv, **kwargs): """ Initialize a BuiltinUQ object. """ @abc.abstractmethod def _process_pretrained_model(self, *argv, **kwargs): """ Method to process the pretrained model that requires UQ. """ raise NotImplementedError @abc.abstractmethod def predict(self, *argv, **kwargs): """ Method to obtain the predicitve uncertainty, this can return the total, epistemic and/or aleatoric uncertainty in the predictions. """ raise NotImplementedError def set_params(self, **parameters): for parameter, value in parameters.items(): setattr(self, parameter, value) return self def get_params(self): """ This method should not take any arguments and returns a dict of the __init__ parameters. """ raise NotImplementedError
__init__.py
from .ucc_recalibration import UCCRecalibration
ucc_recalibration.py
from collections import namedtuple from uq360.algorithms.posthocuq import PostHocUQ from uq360.utils.misc import form_D_for_auucc from uq360.metrics.uncertainty_characteristics_curve.uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve class UCCRecalibration(PostHocUQ): """ Recalibration a regression model to specified operating point using Uncertainty Characteristics Curve. """ def __init__(self, base_model): """ Args: base_model: pretrained model to be recalibrated. """ super(UCCRecalibration).__init__() self.base_model = self._process_pretrained_model(base_model) self.ucc = None def get_params(self, deep=True): return {"base_model": self.base_model} def _process_pretrained_model(self, base_model): return base_model def fit(self, X, y): """ Fit the Uncertainty Characteristics Curve. Args: X: array-like of shape (n_samples, n_features). Features vectors of the test points. y: array-like of shape (n_samples,) or (n_samples, n_targets) Target values Returns: self """ y_pred_mean, y_pred_lower, y_pred_upper = self.base_model.predict(X)[:3] bwu = y_pred_upper - y_pred_mean bwl = y_pred_mean - y_pred_lower self.ucc = UncertaintyCharacteristicsCurve() self.ucc.fit(form_D_for_auucc(y_pred_mean, bwl, bwu), y.squeeze()) return self def predict(self, X, missrate=0.05): """ Generate prediction and uncertainty bounds for data X. Args: X: array-like of shape (n_samples, n_features). Features vectors of the test points. missrate: desired missrate of the new operating point, set to 0.05 by default. Returns: namedtuple: A namedtupe that holds y_mean: ndarray of shape (n_samples, [n_output_dims]) Mean of predictive distribution of the test points. y_lower: ndarray of shape (n_samples, [n_output_dims]) Lower quantile of predictive distribution of the test points. y_upper: ndarray of shape (n_samples, [n_output_dims]) Upper quantile of predictive distribution of the test points. """ C = self.ucc.get_specific_operating_point(req_y_axis_value=missrate, vary_bias=False) new_scale = C['modvalue'] y_pred_mean, y_pred_lower, y_pred_upper = self.base_model.predict(X)[:3] bwu = y_pred_upper - y_pred_mean bwl = y_pred_mean - y_pred_lower if C['operation'] == 'bias': calib_y_pred_upper = y_pred_mean + (new_scale + bwu) # lower bound width calib_y_pred_lower = y_pred_mean - (new_scale + bwl) # Upper bound width else: calib_y_pred_upper = y_pred_mean + (new_scale * bwu) # lower bound width calib_y_pred_lower = y_pred_mean - (new_scale * bwl) # Upper bound width Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper']) res = Result(y_pred_mean, calib_y_pred_lower, calib_y_pred_upper) return res
__init__.py
from .classification_calibration import ClassificationCalibration
classification_calibration.py
from collections import namedtuple import numpy as np from sklearn.calibration import CalibratedClassifierCV from sklearn.preprocessing import LabelEncoder from uq360.utils.misc import DummySklearnEstimator from uq360.algorithms.posthocuq import PostHocUQ class ClassificationCalibration(PostHocUQ): """Post hoc calibration of classification models. Currently wraps `CalibratedClassifierCV` from sklearn and allows non-sklearn models to be calibrated. """ def __init__(self, num_classes, fit_mode="features", method='isotonic', base_model_prediction_func=None): """ Args: num_classes: number of classes. fit_mode: features or probs. If probs the `fit` and `predict` operate on the base models probability scores, useful when these are precomputed. method: isotonic or sigmoid. base_model_prediction_func: the function that takes in the input features and produces base model's probability scores. This is ignored when operating in `probs` mode. """ super(ClassificationCalibration).__init__() if fit_mode == "probs": # In this case, the fit assumes that it receives the probability scores of the base model. # create a dummy estimator self.base_model = DummySklearnEstimator(num_classes, lambda x: x) else: self.base_model = DummySklearnEstimator(num_classes, base_model_prediction_func) self.method = method def get_params(self, deep=True): return {"num_classes": self.num_classes, "fit_mode": self.fit_mode, "method": self.method, "base_model_prediction_func": self.base_model_prediction_func} def _process_pretrained_model(self, base_model): return base_model def fit(self, X, y): """ Fits calibration model using the provided calibration set. Args: X: array-like of shape (n_samples, n_features) or (n_samples, n_classes). Features vectors of the training data or the probability scores from the base model. y: array-like of shape (n_samples,) or (n_samples, n_targets) Target values Returns: self """ self.base_model.label_encoder_ = LabelEncoder().fit(y) self.calib_model = CalibratedClassifierCV(base_estimator=self.base_model, cv="prefit", method=self.method) self.calib_model.fit(X, y) return self def predict(self, X): """ Obtain calibrated predictions for the test points. Args: X: array-like of shape (n_samples, n_features) or (n_samples, n_classes). Features vectors of the training data or the probability scores from the base model. Returns: namedtuple: A namedtupe that holds y_pred: ndarray of shape (n_samples,) Predicted labels of the test points. y_prob: ndarray of shape (n_samples, n_classes) Predicted probability scores of the classes. """ y_prob = self.calib_model.predict_proba(X) if len(np.shape(y_prob)) == 1: y_pred_labels = y_prob > 0.5 else: y_pred_labels = np.argmax(y_prob, axis=1) Result = namedtuple('res', ['y_pred', 'y_prob']) res = Result(y_pred_labels, y_prob) return res
auxiliary_interval_predictor.py
from collections import namedtuple import numpy as np import torch import torch.nn.functional as F from scipy.stats import norm from torch.utils.data import DataLoader from torch.utils.data import TensorDataset from uq360.algorithms.builtinuq import BuiltinUQ np.random.seed(42) torch.manual_seed(42) class _MLPNet_Main(torch.nn.Module): def __init__(self, num_features, num_outputs, num_hidden): super(_MLPNet_Main, self).__init__() self.fc = torch.nn.Linear(num_features, num_hidden) self.fc_mu = torch.nn.Linear(num_hidden, num_outputs) self.fc_log_var = torch.nn.Linear(num_hidden, num_outputs) def forward(self, x): x = F.relu(self.fc(x)) mu = self.fc_mu(x) log_var = self.fc_log_var(x) return mu, log_var class _MLPNet_Aux(torch.nn.Module): def __init__(self, num_features, num_outputs, num_hidden): super(_MLPNet_Aux, self).__init__() self.fc = torch.nn.Linear(num_features, num_hidden) self.fc_log_var = torch.nn.Linear(num_hidden, num_outputs) def forward(self, x): x = F.relu(self.fc(x)) log_var = self.fc_log_var(x) return log_var class AuxiliaryIntervalPredictor(BuiltinUQ): """ Auxiliary Interval Predictor [1]_ uses an auxiliary model to encourage calibration of the main model. References: .. [1] Thiagarajan, J. J., Venkatesh, B., Sattigeri, P., & Bremer, P. T. (2020, April). Building calibrated deep models via uncertainty matching with auxiliary interval predictors. In Proceedings of the AAAI Conference on Artificial Intelligence (Vol. 34, No. 04, pp. 6005-6012). https://arxiv.org/abs/1909.04079 """ def __init__(self, model_type=None, main_model=None, aux_model=None, config=None, device=None, verbose=True): """ Args: model_type: The model type used to build the main model and the auxiliary model. Currently supported values are [mlp, custom]. `mlp` modeltype learns a mlp neural network using pytorch framework. For `custom` the user provide `main_model` and `aux_model`. main_model: (optional) The main prediction model. Currently support pytorch models that return mean and log variance. aux_model: (optional) The auxiliary prediction model. Currently support pytorch models that return calibrated log variance. config: dictionary containing the config parameters for the model. device: device used for pytorch models ignored otherwise. verbose: if True, print statements with the progress are enabled. """ super(AuxiliaryIntervalPredictor).__init__() self.config = config self.device = device self.verbose = verbose if model_type == "mlp": self.model_type = model_type self.main_model = _MLPNet_Main( num_features=self.config["num_features"], num_outputs=self.config["num_outputs"], num_hidden=self.config["num_hidden"], ) self.aux_model = _MLPNet_Aux( num_features=self.config["num_features"], num_outputs=self.config["num_outputs"], num_hidden=self.config["num_hidden"], ) elif model_type == "custom": self.model_type = model_type self.main_model = main_model self.aux_model = aux_model else: raise NotImplementedError def get_params(self, deep=True): return {"model_type": self.model_type, "config": self.config, "main_model": self.main_model, "aux_model": self.aux_model, "device": self.device, "verbose": self.verbose} def _main_model_loss(self, y_true, y_pred_mu, y_pred_log_var, y_pred_log_var_aux): r = torch.abs(y_true - y_pred_mu) # + 0.5 * y_pred_log_var + loss = torch.mean(0.5 * torch.exp(-y_pred_log_var) * r ** 2) + \ self.config["lambda_match"] * torch.mean(torch.abs(torch.exp(0.5 * y_pred_log_var) - torch.exp(0.5 * y_pred_log_var_aux))) return loss def _aux_model_loss(self, y_true, y_pred_mu, y_pred_log_var_aux): deltal = deltau = 2.0 * torch.exp(0.5 * y_pred_log_var_aux) upper = y_pred_mu + deltau lower = y_pred_mu - deltal width = upper - lower r = torch.abs(y_true - y_pred_mu) emce = torch.mean(torch.sigmoid((y_true - lower) * (upper - y_true) * 100000)) loss_emce = torch.abs(self.config["calibration_alpha"]-emce) loss_noise = torch.mean(torch.abs(0.5 * width - r)) loss_sharpness = torch.mean(torch.abs(upper - y_true)) + torch.mean(torch.abs(lower - y_true)) #print(emce) return loss_emce + self.config["lambda_noise"] * loss_noise + self.config["lambda_sharpness"] * loss_sharpness def fit(self, X, y): """ Fit the Auxiliary Interval Predictor model. Args: X: array-like of shape (n_samples, n_features). Features vectors of the training data. y: array-like of shape (n_samples,) or (n_samples, n_targets) Target values Returns: self """ X = torch.from_numpy(X).float().to(self.device) y = torch.from_numpy(y).float().to(self.device) dataset_loader = DataLoader( TensorDataset(X,y), batch_size=self.config["batch_size"] ) optimizer_main_model = torch.optim.Adam(self.main_model.parameters(), lr=self.config["lr"]) optimizer_aux_model = torch.optim.Adam(self.aux_model.parameters(), lr=self.config["lr"]) for it in range(self.config["num_outer_iters"]): # Train the main model for epoch in range(self.config["num_main_iters"]): avg_mean_model_loss = 0.0 for batch_x, batch_y in dataset_loader: self.main_model.train() self.aux_model.eval() batch_y_pred_log_var_aux = self.aux_model(batch_x) batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x) main_loss = self._main_model_loss(batch_y, batch_y_pred_mu, batch_y_pred_log_var, batch_y_pred_log_var_aux) optimizer_main_model.zero_grad() main_loss.backward() optimizer_main_model.step() avg_mean_model_loss += main_loss.item()/len(dataset_loader) if self.verbose: print("Iter: {}, Epoch: {}, main_model_loss = {}".format(it, epoch, avg_mean_model_loss)) # Train the auxiliary model for epoch in range(self.config["num_aux_iters"]): avg_aux_model_loss = 0.0 for batch_x, batch_y in dataset_loader: self.aux_model.train() self.main_model.eval() batch_y_pred_log_var_aux = self.aux_model(batch_x) batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x) aux_loss = self._aux_model_loss(batch_y, batch_y_pred_mu, batch_y_pred_log_var_aux) optimizer_aux_model.zero_grad() aux_loss.backward() optimizer_aux_model.step() avg_aux_model_loss += aux_loss.item() / len(dataset_loader) if self.verbose: print("Iter: {}, Epoch: {}, aux_model_loss = {}".format(it, epoch, avg_aux_model_loss)) return self def predict(self, X, return_dists=False): """ Obtain predictions for the test points. In addition to the mean and lower/upper bounds, also returns full predictive distribution (return_dists=True). Args: X: array-like of shape (n_samples, n_features). Features vectors of the test points. return_dists: If True, the predictive distribution for each instance using scipy distributions is returned. Returns: namedtuple: A namedtupe that holds y_mean: ndarray of shape (n_samples, [n_output_dims]) Mean of predictive distribution of the test points. y_lower: ndarray of shape (n_samples, [n_output_dims]) Lower quantile of predictive distribution of the test points. y_upper: ndarray of shape (n_samples, [n_output_dims]) Upper quantile of predictive distribution of the test points. dists: list of predictive distribution as `scipy.stats` objects with length n_samples. Only returned when `return_dists` is True. """ self.main_model.eval() X = torch.from_numpy(X).float().to(self.device) dataset_loader = DataLoader( X, batch_size=self.config["batch_size"] ) y_mean_list = [] y_log_var_list = [] for batch_x in dataset_loader: batch_y_pred_mu, batch_y_pred_log_var = self.main_model(batch_x) y_mean_list.append(batch_y_pred_mu.data.cpu().numpy()) y_log_var_list.append(batch_y_pred_log_var.data.cpu().numpy()) y_mean = np.concatenate(y_mean_list) y_log_var = np.concatenate(y_log_var_list) y_std = np.sqrt(np.exp(y_log_var)) y_lower = y_mean - 2.0*y_std y_upper = y_mean + 2.0*y_std Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper']) res = Result(y_mean, y_lower, y_upper) if return_dists: dists = [norm(loc=y_mean[i], scale=y_std[i]) for i in range(y_mean.shape[0])] Result = namedtuple('res', Result._fields + ('y_dists',)) res = Result(*res, y_dists=dists) return res
__init__.py
from .auxiliary_interval_predictor import AuxiliaryIntervalPredictor
bnn.py
import copy from collections import namedtuple import numpy as np import torch import torch.nn.functional as F from torch.utils.data import DataLoader import torch.utils.data as data_utils from scipy.stats import norm from sklearn.preprocessing import StandardScaler from uq360.algorithms.builtinuq import BuiltinUQ from uq360.models.bayesian_neural_networks.bnn_models import horseshoe_mlp, bayesian_mlp class BnnRegression(BuiltinUQ): """ Variationally trained BNNs with Gaussian and Horseshoe [6]_ priors for regression. References: .. [6] Ghosh, Soumya, Jiayu Yao, and Finale Doshi-Velez. "Structured variational learning of Bayesian neural networks with horseshoe priors." International Conference on Machine Learning. PMLR, 2018. """ def __init__(self, config, prior="Gaussian"): """ Args: config: a dictionary specifying network and learning hyperparameters. prior: BNN priors specified as a string. Supported priors are Gaussian, Hshoe, RegHshoe """ super(BnnRegression, self).__init__() self.config = config if prior == "Gaussian": self.net = bayesian_mlp.BayesianRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'], num_nodes=config['num_nodes'], num_layers=config['num_layers']) self.config['use_reg_hshoe'] = None elif prior == "Hshoe": self.net = horseshoe_mlp.HshoeRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'], num_nodes=config['num_nodes'], num_layers=config['num_layers'], hshoe_scale=config['hshoe_scale']) self.config['use_reg_hshoe'] = False elif prior == "RegHshoe": self.net = horseshoe_mlp.HshoeRegressionNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'], num_nodes=config['num_nodes'], num_layers=config['num_layers'], hshoe_scale=config['hshoe_scale'], use_reg_hshoe=config['use_reg_hshoe']) self.config['use_reg_hshoe'] = True else: raise NotImplementedError("'prior' must be a string. It can be one of Gaussian, Hshoe, RegHshoe") def get_params(self, deep=True): return {"prior": self.prior, "config": self.config} def fit(self, X, y): """ Fit the BNN regression model. Args: X: array-like of shape (n_samples, n_features). Features vectors of the training data. y: array-like of shape (n_samples,) or (n_samples, n_targets) Target values Returns: self """ torch.manual_seed(1234) optimizer = torch.optim.Adam(self.net.parameters(), lr=self.config['step_size']) neg_elbo = torch.zeros([self.config['num_epochs'], 1]) params_store = {} for epoch in range(self.config['num_epochs']): loss = self.net.neg_elbo(num_batches=1, x=X, y=y.float().unsqueeze(dim=1)) / X.shape[0] optimizer.zero_grad() loss.backward() optimizer.step() if hasattr(self.net, 'fixed_point_updates'): # for hshoe or regularized hshoe nets self.net.fixed_point_updates() neg_elbo[epoch] = loss.item() if (epoch + 1) % 10 == 0: # print ((net.noise_layer.bhat/net.noise_layer.ahat).data.numpy()[0]) print('Epoch[{}/{}], neg elbo: {:.6f}, noise var: {:.6f}' .format(epoch + 1, self.config['num_epochs'], neg_elbo[epoch].item() / X.shape[0], self.net.get_noise_var())) params_store[epoch] = copy.deepcopy(self.net.state_dict()) # for small nets we can just store all. best_model_id = neg_elbo.argmin() # loss_val_store.argmin() # self.net.load_state_dict(params_store[best_model_id.item()]) return self def predict(self, X, mc_samples=100, return_dists=False, return_epistemic=True, return_epistemic_dists=False): """ Obtain predictions for the test points. In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True) and full predictive distribution (return_dists=True). Args: X: array-like of shape (n_samples, n_features). Features vectors of the test points. mc_samples: Number of Monte-Carlo samples. return_dists: If True, the predictive distribution for each instance using scipy distributions is returned. return_epistemic: if True, the epistemic upper and lower bounds are returned. return_epistemic_dists: If True, the epistemic distribution for each instance using scipy distributions is returned. Returns: namedtuple: A namedtupe that holds y_mean: ndarray of shape (n_samples, [n_output_dims]) Mean of predictive distribution of the test points. y_lower: ndarray of shape (n_samples, [n_output_dims]) Lower quantile of predictive distribution of the test points. y_upper: ndarray of shape (n_samples, [n_output_dims]) Upper quantile of predictive distribution of the test points. y_lower_epistemic: ndarray of shape (n_samples, [n_output_dims]) Lower quantile of epistemic component of the predictive distribution of the test points. Only returned when `return_epistemic` is True. y_upper_epistemic: ndarray of shape (n_samples, [n_output_dims]) Upper quantile of epistemic component of the predictive distribution of the test points. Only returned when `return_epistemic` is True. dists: list of predictive distribution as `scipy.stats` objects with length n_samples. Only returned when `return_dists` is True. """ epistemic_out = np.zeros([mc_samples, X.shape[0]]) total_out = np.zeros([mc_samples, X.shape[0]]) for s in np.arange(mc_samples): pred = self.net(X).data.numpy().ravel() epistemic_out[s] = pred total_out[s] = pred + np.sqrt(self.net.get_noise_var()) * np.random.randn(pred.shape[0]) y_total_std = np.std(total_out, axis=0) y_epi_std = np.std(epistemic_out, axis=0) y_mean = np.mean(total_out, axis=0) y_lower = y_mean - 2 * y_total_std y_upper = y_mean + 2 * y_total_std y_epi_lower = y_mean - 2 * y_epi_std y_epi_upper = y_mean + 2 * y_epi_std Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper']) res = Result(y_mean, y_lower, y_upper) if return_epistemic: Result = namedtuple('res', Result._fields + ('lower_epistemic', 'upper_epistemic',)) res = Result(*res, lower_epistemic=y_epi_lower, upper_epistemic=y_epi_upper) if return_dists: dists = [norm(loc=y_mean[i], scale=y_total_std[i]) for i in range(y_mean.shape[0])] Result = namedtuple('res', Result._fields + ('y_dists',)) res = Result(*res, y_dists=dists) if return_epistemic_dists: epi_dists = [norm(loc=y_mean[i], scale=y_epi_std[i]) for i in range(y_mean.shape[0])] Result = namedtuple('res', Result._fields + ('y_epistemic_dists',)) res = Result(*res, y_epistemic_dists=epi_dists) return res class BnnClassification(BuiltinUQ): """ Variationally trained BNNs with Gaussian and Horseshoe [6]_ priors for classification. """ def __init__(self, config, prior="Gaussian", device=None): """ Args: config: a dictionary specifying network and learning hyperparameters. prior: BNN priors specified as a string. Supported priors are Gaussian, Hshoe, RegHshoe """ super(BnnClassification, self).__init__() self.config = config self.device = device if prior == "Gaussian": self.net = bayesian_mlp.BayesianClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'], num_nodes=config['num_nodes'], num_layers=config['num_layers']) self.config['use_reg_hshoe'] = None elif prior == "Hshoe": self.net = horseshoe_mlp.HshoeClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'], num_nodes=config['num_nodes'], num_layers=config['num_layers'], hshoe_scale=config['hshoe_scale']) self.config['use_reg_hshoe'] = False elif prior == "RegHshoe": self.net = horseshoe_mlp.HshoeClassificationNet(ip_dim=config['ip_dim'], op_dim=config['op_dim'], num_nodes=config['num_nodes'], num_layers=config['num_layers'], hshoe_scale=config['hshoe_scale'], use_reg_hshoe=config['use_reg_hshoe']) self.config['use_reg_hshoe'] = True else: raise NotImplementedError("'prior' must be a string. It can be one of Gaussian, Hshoe, RegHshoe") if "batch_size" not in self.config: self.config["batch_size"] = 50 self.net = self.net.to(device) def get_params(self, deep=True): return {"prior": self.prior, "config": self.config, "device": self.device} def fit(self, X=None, y=None, train_loader=None): """ Fits BNN regression model. Args: X: array-like of shape (n_samples, n_features) or (n_samples, n_classes). Features vectors of the training data or the probability scores from the base model. Ignored if train_loader is not None. y: array-like of shape (n_samples,) or (n_samples, n_targets) Target values Ignored if train_loader is not None. train_loader: pytorch train_loader object. Returns: self """ if train_loader is None: train = data_utils.TensorDataset(torch.Tensor(X), torch.Tensor(y.values).long()) train_loader = data_utils.DataLoader(train, batch_size=self.config['batch_size'], shuffle=True) torch.manual_seed(1234) optimizer = torch.optim.Adam(self.net.parameters(), lr=self.config['step_size']) neg_elbo = torch.zeros([self.config['num_epochs'], 1]) params_store = {} for epoch in range(self.config['num_epochs']): avg_loss = 0.0 for batch_x, batch_y in train_loader: loss = self.net.neg_elbo(num_batches=len(train_loader), x=batch_x, y=batch_y) / batch_x.size(0) optimizer.zero_grad() loss.backward() optimizer.step() if hasattr(self.net, 'fixed_point_updates'): # for hshoe or regularized hshoe nets self.net.fixed_point_updates() avg_loss += loss.item() neg_elbo[epoch] = avg_loss / len(train_loader) if (epoch + 1) % 10 == 0: # print ((net.noise_layer.bhat/net.noise_layer.ahat).data.numpy()[0]) print('Epoch[{}/{}], neg elbo: {:.6f}' .format(epoch + 1, self.config['num_epochs'], neg_elbo[epoch].item())) params_store[epoch] = copy.deepcopy(self.net.state_dict()) # for small nets we can just store all. best_model_id = neg_elbo.argmin() # loss_val_store.argmin() # self.net.load_state_dict(params_store[best_model_id.item()]) return self def predict(self, X, mc_samples=100): """ Obtain calibrated predictions for the test points. Args: X: array-like of shape (n_samples, n_features) or (n_samples, n_classes). Features vectors of the training data or the probability scores from the base model. mc_samples: Number of Monte-Carlo samples. Returns: namedtuple: A namedtupe that holds y_pred: ndarray of shape (n_samples,) Predicted labels of the test points. y_prob: ndarray of shape (n_samples, n_classes) Predicted probability scores of the classes. y_prob_var: ndarray of shape (n_samples,) Variance of the prediction on the test points. y_prob_samples: ndarray of shape (mc_samples, n_samples, n_classes) Samples from the predictive distribution. """ X = torch.Tensor(X) y_prob_samples = [F.softmax(self.net(X), dim=1).detach().numpy() for _ in np.arange(mc_samples)] y_prob_samples_stacked = np.stack(y_prob_samples) prob_mean = np.mean(y_prob_samples_stacked, 0) prob_var = np.std(y_prob_samples_stacked, 0) ** 2 if len(np.shape(prob_mean)) == 1: y_pred_labels = prob_mean > 0.5 else: y_pred_labels = np.argmax(prob_mean, axis=1) Result = namedtuple('res', ['y_pred', 'y_prob', 'y_prob_var', 'y_prob_samples']) res = Result(y_pred_labels, prob_mean, prob_var, y_prob_samples) return res
__init__.py
null
homoscedastic_gaussian_process_regression.py
from collections import namedtuple import botorch import gpytorch import numpy as np import torch from botorch.models import SingleTaskGP from botorch.utils.transforms import normalize from gpytorch.constraints import GreaterThan from scipy.stats import norm from sklearn.preprocessing import StandardScaler from uq360.algorithms.builtinuq import BuiltinUQ np.random.seed(42) torch.manual_seed(42) class HomoscedasticGPRegression(BuiltinUQ): """ A wrapper around Botorch SingleTask Gaussian Process Regression [1]_ with homoscedastic noise. References: .. [1] https://botorch.org/api/models.html#singletaskgp """ def __init__(self, kernel=gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel()), likelihood=None, config=None): """ Args: kernel: gpytorch kernel function with default set to `RBFKernel` with output scale. likelihood: gpytorch likelihood function with default set to `GaussianLikelihood`. config: dictionary containing the config parameters for the model. """ super(HomoscedasticGPRegression).__init__() self.config = config self.kernel = kernel self.likelihood = likelihood self.model = None self.scaler = StandardScaler() self.X_bounds = None def get_params(self, deep=True): return {"kernel": self.kernel, "likelihood": self.likelihood, "config": self.config} def fit(self, X, y, **kwargs): """ Fit the GP Regression model. Additional arguments relevant for SingleTaskGP fitting can be passed to this function. Args: X: array-like of shape (n_samples, n_features). Features vectors of the training data. y: array-like of shape (n_samples,) or (n_samples, n_targets) Target values **kwargs: Additional arguments relevant for SingleTaskGP fitting. Returns: self """ y = self.scaler.fit_transform(y) X, y = torch.tensor(X), torch.tensor(y) self.X_bounds = X_bounds = torch.stack([X.min() * torch.ones(X.shape[1]), X.max() * torch.ones(X.shape[1])]) X = normalize(X, X_bounds) model_homo = SingleTaskGP(train_X=X, train_Y=y, covar_module=self.kernel, likelihood=self.likelihood, **kwargs) model_homo.likelihood.noise_covar.register_constraint("raw_noise", GreaterThan(1e-5)) model_homo_marginal_log_lik = gpytorch.mlls.ExactMarginalLogLikelihood(model_homo.likelihood, model_homo) botorch.fit.fit_gpytorch_model(model_homo_marginal_log_lik) model_homo_marginal_log_lik.eval() self.model = model_homo_marginal_log_lik self.inferred_observation_noise = self.scaler.inverse_transform(self.model.likelihood.noise.detach().numpy()[0].reshape(1,1)).squeeze() return self def predict(self, X, return_dists=False, return_epistemic=False, return_epistemic_dists=False): """ Obtain predictions for the test points. In addition to the mean and lower/upper bounds, also returns epistemic uncertainty (return_epistemic=True) and full predictive distribution (return_dists=True). Args: X: array-like of shape (n_samples, n_features). Features vectors of the test points. return_dists: If True, the predictive distribution for each instance using scipy distributions is returned. return_epistemic: if True, the epistemic upper and lower bounds are returned. return_epistemic_dists: If True, the epistemic distribution for each instance using scipy distributions is returned. Returns: namedtuple: A namedtuple that holds y_mean: ndarray of shape (n_samples, [n_output_dims]) Mean of predictive distribution of the test points. y_lower: ndarray of shape (n_samples, [n_output_dims]) Lower quantile of predictive distribution of the test points. y_upper: ndarray of shape (n_samples, [n_output_dims]) Upper quantile of predictive distribution of the test points. y_lower_epistemic: ndarray of shape (n_samples, [n_output_dims]) Lower quantile of epistemic component of the predictive distribution of the test points. Only returned when `return_epistemic` is True. y_upper_epistemic: ndarray of shape (n_samples, [n_output_dims]) Upper quantile of epistemic component of the predictive distribution of the test points. Only returned when `return_epistemic` is True. dists: list of predictive distribution as `scipy.stats` objects with length n_samples. Only returned when `return_dists` is True. """ X = torch.tensor(X) X_test_norm = normalize(X, self.X_bounds) self.model.eval() with torch.no_grad(): posterior = self.model.model.posterior(X_test_norm) y_mean = posterior.mean #y_epi_std = torch.sqrt(posterior.variance) y_lower_epistemic, y_upper_epistemic = posterior.mvn.confidence_region() predictive_posterior = self.model.model.posterior(X_test_norm, observation_noise=True) #y_std = torch.sqrt(predictive_posterior.variance) y_lower_total, y_upper_total = predictive_posterior.mvn.confidence_region() y_mean, y_lower, y_upper, y_lower_epistemic, y_upper_epistemic = self.scaler.inverse_transform(y_mean.numpy()).squeeze(), \ self.scaler.inverse_transform(y_lower_total.numpy()).squeeze(),\ self.scaler.inverse_transform(y_upper_total.numpy()).squeeze(),\ self.scaler.inverse_transform(y_lower_epistemic.numpy()).squeeze(),\ self.scaler.inverse_transform(y_upper_epistemic.numpy()).squeeze() y_epi_std = (y_upper_epistemic - y_lower_epistemic) / 4.0 y_std = (y_upper_total - y_lower_total) / 4.0 Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper']) res = Result(y_mean, y_lower, y_upper) if return_epistemic: Result = namedtuple('res', Result._fields + ('y_lower_epistemic', 'y_upper_epistemic',)) res = Result(*res, y_lower_epistemic=y_lower_epistemic, y_upper_epistemic=y_upper_epistemic) if return_dists: dists = [norm(loc=y_mean[i], scale=y_std[i]) for i in range(y_mean.shape[0])] Result = namedtuple('res', Result._fields + ('y_dists',)) res = Result(*res, y_dists=dists) if return_epistemic_dists: epi_dists = [norm(loc=y_mean[i], scale=y_epi_std[i]) for i in range(y_mean.shape[0])] Result = namedtuple('res', Result._fields + ('y_epistemic_dists',)) res = Result(*res, y_epistemic_dists=epi_dists) return res