SFL / Settings.py
amirhosseinkarami's picture
Add code files
bae498f
"""""""""""""""""""""""""""""""""
This file is for modifying.
Do not run this file.
For running: RegressorTest.py
For modifying: Settings.py
"""""""""""""""""""""""""""""""""
import numpy as np
import tensorflow as tf
"""""""""""""""""""""""""""
Settings that can change
"""""""""""""""""""""""""""
# Determines complexity of tree
n_tree_layers = 3
# Allowable operators for tree nodes
# function_set = ['id', 'mul', 'sqrt', 'sin', 'div', 'log']
function_set = ["id", "mul", "sin", "sqrt"]
num_features = 1
num_dims_per_feature = 1
n_dims_in_output = 1
train_scope = [0, 5]
test_scope = [y * 2 for y in train_scope]
num_train_repeat_processes = 5
num_train_steps_in_repeat_mode = 8000
"""""""""""""""""""""""""""
Display and log settings
"""""""""""""""""""""""""""
show_output = False
keep_logs = False
output_freq = 49000
plot_frequency = 500000
save_all_formulas = False
max_formula_output_length = 400
"""""""""""""""""""""""""""
Tree settings
"""""""""""""""""""""""""""
use_both_for_unary = True
non_const = False
use_leaf_sm = False
"""""""""""""""""""""""""""
Domain parameters
"""""""""""""""""""""""""""
fpe_example = 0
max_x = np.pi
train_scope2 = [0, 5]
test_scope2 = test_scope.copy() # [0, 5]
avoid_zero = False
# if fpe_example == 4:
# avoid_zero = True
"""""""""""""""""""""""""""
Define the ODE here
"""""""""""""""""""""""""""
# options: mode = "sr", "de", "lr"
mode = "de"
# This is the "g" function that defines the ODE problem.
def implicit_function(full_x, y, y_p, y_pp):
# Implicit function is 0 if we are doing symbolic regression
if mode == "sr":
return y * 0.0
y_p1 = y_p[0]
y_p2 = y_p[1]
y_p3 = y_p[2]
y_pp1 = y_pp[0]
y_pp2 = y_pp[1]
y_pp12 = y_pp[2]
x = tf.reshape(full_x[:, 0, 0], [-1, 1, 1])
t = tf.reshape(full_x[:, 0, -1], [-1, 1, 1])
if num_features > 1:
w = tf.reshape(full_x[:, 0, 1], [-1, 1, 1])
ret_val = None
""" Lane-Emden Equation """
# emden_m = 0
# ret_val = y_pp1 + 2.0 * tf.math.divide_no_nan(y_p1, x)
# ret_val += y ** emden_m
""" Bell curve integral """
# ret_val = tf.math.exp(-1.0 * tf.square(x)) - y_p1
""" One dimensional wave equation """
# c = 1.0
# ret_val = y_pp2 - c**2 * y_pp1
""" One dimensional heat equation """
# c = 1.0
# ret_val = y_p2 - c**2 * y_pp1 - tf.math.cos(x)
""" Inhomogeneous wave equation """
# ret_val = y_pp1 - y_pp2 - 2
""" Two dimensional Laplace equation """
# ret_val = y_pp2 + y_pp1
ret_val = y_p1 - 2 * x
""" FP Eqn """
if fpe_example == 1:
# Example 1
a = -1.0
a_p = 0.0
b = 1.0
b_p = 0.0
b_pp = 0.0
elif fpe_example == 2:
# Example 2
a = x
a_p = 1.0
b = tf.math.square(x) / 2
b_p = x
b_pp = 1.0
elif fpe_example == 3:
# Example 3
a = -1.0 - x
a_p = -1.0
b = tf.multiply(x ** 2, tf.math.exp(t))
b_p = 2 * x * tf.math.exp(t)
b_pp = 2 * tf.math.exp(t)
elif fpe_example == 4:
# Example 4
a = 4.0 * tf.math.divide_no_nan(y, x) - x / 3.0
a_p = 4.0 * (tf.math.divide_no_nan(y_p1, x) - tf.math.divide_no_nan(y, x ** 2)) - 1.0 / 3
b = y
b_p = y_p1
b_pp = y_pp1
elif fpe_example == 6:
# Example 5
a = 0.0
a_p = 0.0
b = 0.5
b_p = 0.0
b_pp = 0.0
if fpe_example > 0:
t1 = tf.multiply(y, b_pp - a_p)
t2 = tf.multiply(y_p1, 2 * b_p - a)
t3 = tf.multiply(y_pp1, b)
# print("a: {}".format(a.shape))
# print("a_p: {}".format(a_p.shape))
# print("b: {}".format(a.shape))
# print("b_p: {}".format(b_p.shape))
# print("b_pp: {}".format(b_pp.shape))
# print("t1: {}".format(t1.shape))
# print("t2: {}".format(t2.shape))
# print("t3: {}".format(t3.shape))
ret_val = y_p2 - (t1 + t2 + t3)
if fpe_example == 3:
ret_val = y_p2 - ((x + 1) * y_p1 + x ** 2 * tf.math.exp(t) * y_pp1)
if fpe_example == 4:
t1 = y * (y_pp1 * x ** 2 - 4 * y_p1 * x + 4 * y + x * x / 3.0)
t2 = y_p1 * (2 * x * y_p1 - 4 * x * y + x * x * x / 3.0)
t3 = x ** 2 * y_pp1 * y
ret_val = y_p2 * x * x - (t1 + t2 + t3)
if fpe_example == 5:
ret_val = y_p3 - (-2 * y + 3 * x * y_p1 - w * y_p2 + x ** 2 * y_pp1 + w ** 2 * y_pp2 + 2 * y_pp12)
return ret_val
"""""""""""""""""""""""""""
Initial values
"""""""""""""""""""""""""""
# initialize_ops are given in bottom-up order.
initialize_ops = np.zeros([2 ** n_tree_layers - 1])
# initialize_ops = ["mul", "mul", "id"]
# if fpe_example in [1, 2, 3, 5]:
# initialize_ops = ["id", "exp", "mul"]
# elif fpe_example in [4]:
# initialize_ops = ["mul", "exp", "mul"]
# initialize_ops = ["exp", "sin", "mul"]
#
min_x = 0
max_t = 5
min_t = 0
n_bc_points = 5
# Initial values for (x, y)
fixed_x = []
fixed_y = []
# for i in range(n_bc_points):
# t_i = i * (max_t - min_t)/n_bc_points + min_t
# fixed_x.append([0, t_i])
# # fixed_y.append(0)
# fixed_y.append(1 - np.exp(-1 * t_i))
# fixed_x.append([np.pi, t_i])
# fixed_y.append(np.exp(-1 * t_i) - 1)
# fixed_y.append(0)
# Initial values for (x, y')
fixed_x_p1 = []
fixed_y_p1 = []
fixed_x_p2 = []
fixed_y_p2 = []
if mode == "de":
if fpe_example != 5:
for i in range(n_bc_points):
x_i = i * (max_x - min_x) / (n_bc_points - 1) + min_x
fixed_x.append([x_i, 0])
if fpe_example in [1, 2, 5]:
fixed_y.append(x_i)
elif fpe_example == 3:
fixed_y.append(x_i + 1)
elif fpe_example == 4:
fixed_y.append(x_i ** 2)
# fixed_y.append(0)
# fixed_y.append(x_i + np.cos(x_i))
# fixed_x_p2.append([x_i, 0])
# fixed_y_p2.append(np.sin(x_i))
if fpe_example == 5:
for i in range(n_bc_points):
x_i = i * (max_x - min_x) / (n_bc_points - 1) + min_x
for j in range(n_bc_points):
w_j = j * (max_x - min_x) / (n_bc_points - 1) + min_x
fixed_x.append([x_i, w_j, 0])
fixed_y.append(x_i)
# print("IVP (x, y):\n{}".format([(fixed_x[i], fixed_y[i]) for i in range(len(fixed_x))]))
# print("IVP (x, y_p1):\n{}".format([(fixed_x_p1[i], fixed_y_p1[i]) for i in range(len(fixed_x_p1))]))
# print("IVP (x, y_p2):\n{}".format([(fixed_x_p2[i], fixed_y_p2[i]) for i in range(len(fixed_x_p2))]))
# Weight to give IVP error
ivp_lambda = 10
"""""""""""""""""""""""""""
Training hyperparameters
"""""""""""""""""""""""""""
quick_train_fraction = 0.7
# Probably don't need to change any of the ones below
max_training_batch_size = 1000
t1_fraction = 5/20
t2_fraction = 15 / 20
train_N = 5000
test_N = 1000
eps = 1e-4
big_eps = 1e-3
d_eps = 2.0e-2
learn_rate = 0.001
w_matrix_stddev = 0.1
init_weight_value = 5