m / bn.py
jieyuz2's picture
Upload 2 files
21ac98b verified
import numpy as np
import pandas as pd
import pyAgrum as gum
from sklearn.preprocessing import StandardScaler
# Load the Excel file. Replace 'input_file.xlsx' with your file name.
data = pd.read_excel('./Final_BN_0323.xlsx')
data.columns = data.columns.astype(str)
for column in data.columns:
print(column)
# # # Fill missing values with -1 (pyAgrum uses -1 to denote missing values)
# data = data.fillna(-1)
data = data.replace(' ', -1)
for col in data.columns:
if data[col].dtype == np.float64:
print(f'{col} is numerical')
if col in ['satisfaction_infrastructure', 'satisfaction_facility', 'satisfaction_nature', 'satisfaction_service']:
scaler = StandardScaler()
# Create a mask for the rows where data[col] is not -1
mask = data[col] != -1
# Reshape the data to 2D before scaling
data.loc[mask, col] = scaler.fit_transform(
data.loc[mask, col].values.reshape(-1, 1)
)
data[col][data[col] != -1] = pd.cut(data[col][data[col] != -1], bins=5, labels=False)
data[col] = data[col].fillna(-1).astype(int)
else:
print(f'{col} is categorical')
# Convert the column to a categorical type.
# Missing values (NaN) will be kept, and cat.codes will assign them a code of -1.
data[col] = data[col].astype('category')
# Replace the original column with its integer codes
data[col] = data[col].cat.codes
print(data.columns)
for column in data.columns:
print(f'{column}: {data[column].unique()}')
# from edges import structure
structure1 = [
#visitor demographics to travel characteristics
('gender', 'travel motivation'),
('residential status', 'travel motivation'),
('residential status', 'duration'),
('residential status', 'travel group'),
('nationality', 'travel motivation'),
('nationality', 'duration'),
('nationality', 'travel group'),
#travel characteristics to satisfaction level to park attributes
('travel motivation', 'Satisfaction_nature'),
('duration', 'Satisfaction_service'),
('duration', 'Satisfaction_nature'),
('travel group', 'Satisfaction_nature'),
#performed activity to satisfaction level to park attributes
('nature_based_activity', 'Satisfaction_service'),
('nature_based_activity', 'Satisfaction_nature'),
('facility_based_activity', 'Satisfaction_service'),
('facility_based_activity', 'Satisfaction_nature'),
('human_in_landmark', 'Satisfaction_service'),
('human_in_landmark', 'Satisfaction_nature'),
#perceived importance to park attribute to satisfaction level to park attributes
('importance to infrastructure', 'Satisfaction_infrastructure'),
('importance to service', 'Satisfaction_service'),
('importance to nature', 'Satisfaction_service'),
#satisfaction level to park attributes to overall satisfaction
('Satisfaction_infrastructure', 'overall satisfaction level'),
('Satisfaction_service', 'overall satisfaction level'),
('Satisfaction_nature', 'overall satisfaction level')
]
structure = [
#year to visitor demographics
('Year', 'gender'),
('Year', 'age'),
('Year', 'residential status'),
('Year', 'nationality'),
('Year', 'educational level'),
('Year', 'employment status'),
('Year', 'income level'),
('Year', 'preferred language'),
#national park to visitor demographics
('park', 'gender'),
('park', 'age'),
('park', 'residential status'),
('park', 'nationality'),
('park', 'educational level'),
('park', 'employment status'),
('park', 'income level'),
('park', 'preferred language'),
#visitor demographics to satisfaction level to park attributes
('gender', 'satisfaction_infrastructure'),
('gender', 'satisfaction_service'),
('gender', 'satisfaction_nature'),
('gender', 'satisfaction_facility'),
('age', 'satisfaction_infrastructure'),
('age', 'satisfaction_service'),
('age', 'satisfaction_nature'),
('age', 'satisfaction_facility'),
('residential status', 'satisfaction_service'),
('residential status', 'satisfaction_nature'),
('residential status', 'satisfaction_facility'),
('nationality', 'satisfaction_infrastructure'),
('nationality', 'satisfaction_service'),
('nationality', 'satisfaction_nature'),
('educational level', 'satisfaction_service'),
('educational level', 'satisfaction_facility'),
('employment status', 'satisfaction_nature'),
('income level', 'satisfaction_infrastructure'),
('income level', 'satisfaction_service'),
('income level', 'satisfaction_facility'),
('preferred language', 'satisfaction_infrastructure'),
('preferred language', 'satisfaction_service'),
('preferred language', 'satisfaction_nature'),
('preferred language', 'satisfaction_facility'),
#travel characteristics to satisfaction level to park attributes
('source of knowing', 'satisfaction_service'),
('travel motivation', 'satisfaction_service'),
('means of transportation', 'satisfaction_service'),
('means of transportation', 'satisfaction_nature'),
('duration', 'satisfaction_infrastructure'),
('duration', 'satisfaction_service'),
('duration', 'satisfaction_nature'),
('travel group', 'satisfaction_infrastructure'),
('travel group', 'satisfaction_facility'),
#performed activity to satisfaction level to park attributes
('nature_based_activity', 'satisfaction_infrastructure'),
('nature_based_activity', 'satisfaction_service'),
('nature_based_activity', 'satisfaction_facility'),
('facility_based_activity', 'satisfaction_nature'),
('human_in_landmark', 'satisfaction_nature'),
('human_in_landmark', 'satisfaction_facility'),
#perceived importance to park attribute to satisfaction level to park attributes
('importance_infrastructure', 'satisfaction_infrastructure'),
('importance_service', 'satisfaction_service'),
('importance_nature', 'satisfaction_nature'),
('importance_facility', 'satisfaction_facility'),
#satisfaction level to park attributes to overall satisfaction
('satisfaction_infrastructure', 'overall satisfaction level'),
('satisfaction_service', 'overall satisfaction level'),
('satisfaction_nature', 'overall satisfaction level'),
('satisfaction_facility', 'overall satisfaction level'),
#overall satisfaction level to visitor loyalty
('overall satisfaction level', 'Intention to return'),
('overall satisfaction level', 'Intention to recommend'),
]
print(len(structure))
nodes = set()
for edge in structure:
nodes.add(edge[0])
nodes.add(edge[1])
data_subset = data[list(nodes)]
# data_subset = data_subset.sample(n=5000).reset_index(drop=True)
data_path = './data.csv'
data_subset.to_csv(data_path, index=False)
df = pd.read_csv(data_path)
# unique_labels = {col: list(map(str, df[col].dropna().unique())) for col in df.columns}
unique_labels = {col: [int(i) for i in df[col].dropna().unique() if i != -1] for col in df.columns}
# Create a new Bayesian network
bn = gum.BayesNet("MyBN")
# Add variables to the BN using the inferred labels.
# Each variable will have a domain size equal to the number of unique labels.
for var, labs in unique_labels.items():
# Add the variable to the BN with the correct number of states.
# var_id = bn.add(gum.LabelizedVariable(var, var, len(labs)))
var_id = bn.add(gum.IntegerVariable(var, var, labs))
print(111111)
# Parse the structure string to add the arcs.
cnt = 0
for parent, child in structure:
bn.addArc(bn.idFromName(parent), bn.idFromName(child))
cnt += 1
print(cnt)
print(111111)
# Now create the learner with the newly built BN.
learner = gum.BNLearner(data_path, bn, ["-1"])
learner.setVerbosity(True)
learner.setNumberOfThreads(20)
learner.setMaxTime(604800)
learner.useEM(1e-3)
learner.useSmoothingPrior()
print(learner)
bn = learner.learnParameters(bn.dag())
print(f"# iterations : {learner.nbrIterations()}")
gum.saveBN(bn, "bn_final.bif", allowModificationWhenSaving=True)
import pyAgrum.lib.image as gumimage
gumimage.exportInference(bn, "./bn_final.png")
print(learner.history())
a = 1