prompt
stringlengths 174
59.5k
| completion
stringlengths 7
228
| api
stringlengths 12
64
|
---|---|---|
from __future__ import absolute_import
import os.path as op
import shutil
import numpy as nm
from sfepy.base.base import ordered_iteritems, output, set_defaults, assert_
from sfepy.base.base import Struct
from sfepy.homogenization.engine import HomogenizationEngine
from sfepy.homogenization.homogen_app import HomogenizationApp
from sfepy.homogenization.coefficients import Coefficients
from sfepy.homogenization.coefs_base import CoefDummy
from sfepy.applications import PDESolverApp
from sfepy.base.plotutils import plt
import six
from six.moves import range
def try_set_defaults(obj, attr, defaults, recur=False):
try:
values = getattr(obj, attr)
except:
values = defaults
else:
if recur and isinstance(values, dict):
for key, val in six.iteritems(values):
set_defaults(val, defaults)
else:
set_defaults(values, defaults)
return values
def save_raw_bg_logs(filename, logs):
"""
Save raw band gaps `logs` into the `filename` file.
"""
out = {}
iranges = nm.cumsum([0] + [len(ii) for ii in logs.freqs])
out['iranges'] = iranges
for key, log in ordered_iteritems(logs.to_dict()):
out[key] = nm.concatenate(log, axis=0)
with open(filename, 'w') as fd:
nm.savez(fd, **out)
def transform_plot_data(datas, plot_transform, conf):
if plot_transform is not None:
fun = conf.get_function(plot_transform[0])
dmin, dmax = 1e+10, -1e+10
tdatas = []
for data in datas:
tdata = data.copy()
if plot_transform is not None:
tdata = fun(tdata, *plot_transform[1:])
dmin = min(dmin, nm.nanmin(tdata))
dmax = max(dmax, nm.nanmax(tdata))
tdatas.append(tdata)
dmin, dmax = min(dmax - 1e-8, dmin), max(dmin + 1e-8, dmax)
return (dmin, dmax), tdatas
def plot_eigs(fig_num, plot_rsc, plot_labels, valid, freq_range, plot_range,
show=False, clear=False, new_axes=False):
"""
Plot resonance/eigen-frequencies.
`valid` must correspond to `freq_range`
resonances : red
masked resonances: dotted red
"""
if plt is None: return
assert_(len(valid) == len(freq_range))
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
l0 = l1 = None
for ii, f in enumerate(freq_range):
if valid[ii]:
l0 = ax.plot([f, f], plot_range, **plot_rsc['resonance'])[0]
else:
l1 = ax.plot([f, f], plot_range, **plot_rsc['masked'])[0]
if l0:
l0.set_label(plot_labels['resonance'])
if l1:
l1.set_label(plot_labels['masked'])
if new_axes:
ax.set_xlim([freq_range[0], freq_range[-1]])
ax.set_ylim(plot_range)
if show:
plt.show()
return fig
def plot_logs(fig_num, plot_rsc, plot_labels,
freqs, logs, valid, freq_range, plot_range,
draw_eigs=True, show_legend=True, show=False,
clear=False, new_axes=False):
"""
Plot logs of min/middle/max eigs of a mass matrix.
"""
if plt is None: return
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
if draw_eigs:
plot_eigs(fig_num, plot_rsc, plot_labels, valid, freq_range,
plot_range)
for ii, log in enumerate(logs):
l1 = ax.plot(freqs[ii], log[:, -1], **plot_rsc['eig_max'])
if log.shape[1] >= 2:
l2 = ax.plot(freqs[ii], log[:, 0], **plot_rsc['eig_min'])
else:
l2 = None
if log.shape[1] == 3:
l3 = ax.plot(freqs[ii], log[:, 1], **plot_rsc['eig_mid'])
else:
l3 = None
l1[0].set_label(plot_labels['eig_max'])
if l2:
l2[0].set_label(plot_labels['eig_min'])
if l3:
l3[0].set_label(plot_labels['eig_mid'])
fmin, fmax = freqs[0][0], freqs[-1][-1]
ax.plot([fmin, fmax], [0, 0], **plot_rsc['x_axis'])
ax.set_xlabel(plot_labels['x_axis'])
ax.set_ylabel(plot_labels['y_axis'])
if new_axes:
ax.set_xlim([fmin, fmax])
ax.set_ylim(plot_range)
if show_legend:
ax.legend()
if show:
plt.show()
return fig
def plot_gap(ax, ranges, kind, kind_desc, plot_range, plot_rsc):
"""
Plot single band gap frequency ranges as rectangles.
"""
def draw_rect(ax, x, y, rsc):
ax.fill(nm.asarray(x)[[0,1,1,0]],
nm.asarray(y)[[0,0,1,1]],
**rsc)
# Colors.
strong = plot_rsc['strong_gap']
weak = plot_rsc['weak_gap']
propagation = plot_rsc['propagation']
if kind == 'p':
draw_rect(ax, ranges[0], plot_range, propagation)
elif kind == 'w':
draw_rect(ax, ranges[0], plot_range, weak)
elif kind == 'wp':
draw_rect(ax, ranges[0], plot_range, weak)
draw_rect(ax, ranges[1], plot_range, propagation)
elif kind == 's':
draw_rect(ax, ranges[0], plot_range, strong)
elif kind == 'sw':
draw_rect(ax, ranges[0], plot_range, strong)
draw_rect(ax, ranges[1], plot_range, weak)
elif kind == 'swp':
draw_rect(ax, ranges[0], plot_range, strong)
draw_rect(ax, ranges[1], plot_range, weak)
draw_rect(ax, ranges[2], plot_range, propagation)
elif kind == 'is':
draw_rect(ax, ranges[0], plot_range, strong)
elif kind == 'iw':
draw_rect(ax, ranges[0], plot_range, weak)
else:
raise ValueError('unknown band gap kind! (%s)' % kind)
def plot_gaps(fig_num, plot_rsc, gaps, kinds, gap_ranges, freq_range,
plot_range, show=False, clear=False, new_axes=False):
"""
Plot band gaps as rectangles.
"""
if plt is None: return
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
for ii in range(len(freq_range) - 1):
f0, f1 = freq_range[[ii, ii+1]]
gap = gaps[ii]
ranges = gap_ranges[ii]
if isinstance(gap, list):
for ig, (gmin, gmax) in enumerate(gap):
kind, kind_desc = kinds[ii][ig]
plot_gap(ax, ranges[ig], kind, kind_desc, plot_range, plot_rsc)
output(ii, gmin[0], gmax[0], '%.8f' % f0, '%.8f' % f1)
output(' -> %s\n %s' %(kind_desc, ranges[ig]))
else:
gmin, gmax = gap
kind, kind_desc = kinds[ii]
plot_gap(ax, ranges, kind, kind_desc, plot_range, plot_rsc)
output(ii, gmin[0], gmax[0], '%.8f' % f0, '%.8f' % f1)
output(' -> %s\n %s' %(kind_desc, ranges))
if new_axes:
ax.set_xlim([freq_range[0], freq_range[-1]])
ax.set_ylim(plot_range)
if show:
plt.show()
return fig
def _get_fig_name(output_dir, fig_name, key, common, fig_suffix):
"""
Construct the complete name of figure file.
"""
name = key.replace(common, '')
if name and (not name.startswith('_')):
name = '_' + name
fig_name = fig_name + name + fig_suffix
return op.join(output_dir, fig_name)
class AcousticBandGapsApp(HomogenizationApp):
"""
Application for computing acoustic band gaps.
"""
@staticmethod
def process_options(options):
"""
Application options setup. Sets default values for missing
non-compulsory options.
"""
get = options.get
default_plot_options = {'show' : True,'legend' : False,}
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'min eig($M^*$)',
'eig_mid' : r'mid eig($M^*$)',
'eig_max' : r'max eig($M^*$)',
'x_axis' : r'$\sqrt{\lambda}$, $\omega$',
'y_axis' : r'eigenvalues of mass matrix $M^*$',
}
plot_labels = try_set_defaults(options, 'plot_labels', aux, recur=True)
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'$\kappa$(min)',
'eig_mid' : r'$\kappa$(mid)',
'eig_max' : r'$\kappa$(max)',
'x_axis' : r'$\sqrt{\lambda}$, $\omega$',
'y_axis' : 'polarization angles',
}
plot_labels_angle = try_set_defaults(options, 'plot_labels_angle', aux)
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'wave number (min)',
'eig_mid' : r'wave number (mid)',
'eig_max' : r'wave number (max)',
'x_axis' : r'$\sqrt{\lambda}$, $\omega$',
'y_axis' : 'wave numbers',
}
plot_labels_wave = try_set_defaults(options, 'plot_labels_wave', aux)
plot_rsc = {
'resonance' : {'linewidth' : 0.5, 'color' : 'r', 'linestyle' : '-'},
'masked' : {'linewidth' : 0.5, 'color' : 'r', 'linestyle' : ':'},
'x_axis' : {'linewidth' : 0.5, 'color' : 'k', 'linestyle' : '--'},
'eig_min' : {'linewidth' : 2.0, 'color' : (0.0, 0.0, 1.0),
'linestyle' : ':' },
'eig_mid' : {'linewidth' : 2.0, 'color' : (0.0, 0.0, 0.8),
'linestyle' : '--' },
'eig_max' : {'linewidth' : 2.0, 'color' : (0.0, 0.0, 0.6),
'linestyle' : '-' },
'strong_gap' : {'linewidth' : 0, 'facecolor' : (0.2, 0.4, 0.2)},
'weak_gap' : {'linewidth' : 0, 'facecolor' : (0.6, 0.8, 0.6)},
'propagation' : {'linewidth' : 0, 'facecolor' : (1, 1, 1)},
'params' : {'axes.labelsize': 'x-large',
'font.size': 14,
'legend.fontsize': 'large',
'legend.loc': 'best',
'xtick.labelsize': 'large',
'ytick.labelsize': 'large',
'text.usetex': True},
}
plot_rsc = try_set_defaults(options, 'plot_rsc', plot_rsc)
return Struct(incident_wave_dir=get('incident_wave_dir', None),
plot_transform=get('plot_transform', None),
plot_transform_wave=get('plot_transform_wave', None),
plot_transform_angle=get('plot_transform_angle', None),
plot_options=get('plot_options', default_plot_options),
fig_name=get('fig_name', None),
fig_name_wave=get('fig_name_wave', None),
fig_name_angle=get('fig_name_angle', None),
fig_suffix=get('fig_suffix', '.pdf'),
plot_labels=plot_labels,
plot_labels_angle=plot_labels_angle,
plot_labels_wave=plot_labels_wave,
plot_rsc=plot_rsc)
@staticmethod
def process_options_pv(options):
"""
Application options setup for phase velocity computation. Sets default
values for missing non-compulsory options.
"""
get = options.get
incident_wave_dir=get('incident_wave_dir', None,
'missing "incident_wave_dir" in options!')
return Struct(incident_wave_dir=incident_wave_dir)
def __init__(self, conf, options, output_prefix, **kwargs):
PDESolverApp.__init__(self, conf, options, output_prefix,
init_equations=False)
self.setup_options()
if conf._filename:
output_dir = self.problem.output_dir
shutil.copyfile(conf._filename,
op.join(output_dir, op.basename(conf._filename)))
def setup_options(self):
HomogenizationApp.setup_options(self)
if self.options.phase_velocity:
process_options = AcousticBandGapsApp.process_options_pv
else:
process_options = AcousticBandGapsApp.process_options
self.app_options += process_options(self.conf.options)
def call(self):
"""
Construct and call the homogenization engine accoring to options.
"""
options = self.options
opts = self.app_options
conf = self.problem.conf
coefs_name = opts.coefs
coef_info = conf.get(opts.coefs, None,
'missing "%s" in problem description!'
% opts.coefs)
if options.detect_band_gaps:
# Compute band gaps coefficients and data.
keys = [key for key in coef_info if key.startswith('band_gaps')]
elif options.analyze_dispersion or options.phase_velocity:
# Insert incident wave direction to coefficients that need it.
for key, val in six.iteritems(coef_info):
coef_opts = val.get('options', None)
if coef_opts is None: continue
if (('incident_wave_dir' in coef_opts)
and (coef_opts['incident_wave_dir'] is None)):
coef_opts['incident_wave_dir'] = opts.incident_wave_dir
if options.analyze_dispersion:
# Compute dispersion coefficients and data.
keys = [key for key in coef_info
if key.startswith('dispersion')
or key.startswith('polarization_angles')]
else:
# Compute phase velocity and its requirements.
keys = [key for key in coef_info
if key.startswith('phase_velocity')]
else:
# Compute only the eigenvalue problems.
names = [req for req in conf.get(opts.requirements, [''])
if req.startswith('evp')]
coefs = {'dummy' : {'requires' : names,
'class' : CoefDummy,}}
conf.coefs_dummy = coefs
coefs_name = 'coefs_dummy'
keys = ['dummy']
he_options = Struct(coefs=coefs_name, requirements=opts.requirements,
compute_only=keys,
post_process_hook=self.post_process_hook,
multiprocessing=False)
volumes = {}
if hasattr(opts, 'volumes') and (opts.volumes is not None):
volumes.update(opts.volumes)
elif hasattr(opts, 'volume') and (opts.volume is not None):
volumes['total'] = opts.volume
else:
volumes['total'] = 1.0
he = HomogenizationEngine(self.problem, options,
app_options=he_options,
volumes=volumes)
coefs = he()
coefs = Coefficients(**coefs.to_dict())
coefs_filename = op.join(opts.output_dir, opts.coefs_filename)
coefs.to_file_txt(coefs_filename + '.txt',
opts.tex_names,
opts.float_format)
bg_keys = [key for key in coefs.to_dict()
if key.startswith('band_gaps')
or key.startswith('dispersion')]
for ii, key in enumerate(bg_keys):
bg = coefs.get(key)
log_save_name = bg.get('log_save_name', None)
if log_save_name is not None:
filename = op.join(self.problem.output_dir, log_save_name)
bg.save_log(filename, opts.float_format, bg)
raw_log_save_name = bg.get('raw_log_save_name', None)
if raw_log_save_name is not None:
filename = op.join(self.problem.output_dir, raw_log_save_name)
save_raw_bg_logs(filename, bg.logs)
if options.plot:
if options.detect_band_gaps:
self.plot_band_gaps(coefs)
elif options.analyze_dispersion:
self.plot_dispersion(coefs)
elif options.phase_velocity:
keys = [key for key in coefs.to_dict()
if key.startswith('phase_velocity')]
for key in keys:
output('%s:' % key, coefs.get(key))
return coefs
def plot_band_gaps(self, coefs):
opts = self.app_options
bg_keys = [key for key in coefs.to_dict()
if key.startswith('band_gaps')]
plot_opts = opts.plot_options
plot_rsc = opts.plot_rsc
plt.rcParams.update(plot_rsc['params'])
for ii, key in enumerate(bg_keys):
bg = coefs.get(key)
plot_labels = opts.plot_labels.get(key, opts.plot_labels)
plot_range, teigs = transform_plot_data(bg.logs.eigs,
opts.plot_transform,
self.conf)
fig = plot_gaps(ii, plot_rsc, bg.gaps, bg.kinds, bg.gap_ranges,
bg.freq_range_margins, plot_range,
clear=True)
fig = plot_logs(ii, plot_rsc, plot_labels, bg.logs.freqs, teigs,
bg.valid[bg.eig_range],
bg.freq_range_initial,
plot_range,
show_legend=plot_opts['legend'],
new_axes=True)
plt.tight_layout()
if opts.fig_name is not None:
fig_name = _get_fig_name(self.problem.output_dir, opts.fig_name,
key, 'band_gaps', opts.fig_suffix)
fig.savefig(fig_name)
if plot_opts['show']:
plt.show()
def plot_dispersion(self, coefs):
opts = self.app_options
bg_keys = [key for key in coefs.to_dict()
if key.startswith('dispersion')]
plot_rsc = opts.plot_rsc
plot_opts = opts.plot_options
| plt.rcParams.update(plot_rsc['params']) | sfepy.base.plotutils.plt.rcParams.update |
from __future__ import absolute_import
import os.path as op
import shutil
import numpy as nm
from sfepy.base.base import ordered_iteritems, output, set_defaults, assert_
from sfepy.base.base import Struct
from sfepy.homogenization.engine import HomogenizationEngine
from sfepy.homogenization.homogen_app import HomogenizationApp
from sfepy.homogenization.coefficients import Coefficients
from sfepy.homogenization.coefs_base import CoefDummy
from sfepy.applications import PDESolverApp
from sfepy.base.plotutils import plt
import six
from six.moves import range
def try_set_defaults(obj, attr, defaults, recur=False):
try:
values = getattr(obj, attr)
except:
values = defaults
else:
if recur and isinstance(values, dict):
for key, val in six.iteritems(values):
set_defaults(val, defaults)
else:
| set_defaults(values, defaults) | sfepy.base.base.set_defaults |
from __future__ import absolute_import
import os.path as op
import shutil
import numpy as nm
from sfepy.base.base import ordered_iteritems, output, set_defaults, assert_
from sfepy.base.base import Struct
from sfepy.homogenization.engine import HomogenizationEngine
from sfepy.homogenization.homogen_app import HomogenizationApp
from sfepy.homogenization.coefficients import Coefficients
from sfepy.homogenization.coefs_base import CoefDummy
from sfepy.applications import PDESolverApp
from sfepy.base.plotutils import plt
import six
from six.moves import range
def try_set_defaults(obj, attr, defaults, recur=False):
try:
values = getattr(obj, attr)
except:
values = defaults
else:
if recur and isinstance(values, dict):
for key, val in six.iteritems(values):
set_defaults(val, defaults)
else:
set_defaults(values, defaults)
return values
def save_raw_bg_logs(filename, logs):
"""
Save raw band gaps `logs` into the `filename` file.
"""
out = {}
iranges = nm.cumsum([0] + [len(ii) for ii in logs.freqs])
out['iranges'] = iranges
for key, log in ordered_iteritems(logs.to_dict()):
out[key] = nm.concatenate(log, axis=0)
with open(filename, 'w') as fd:
nm.savez(fd, **out)
def transform_plot_data(datas, plot_transform, conf):
if plot_transform is not None:
fun = conf.get_function(plot_transform[0])
dmin, dmax = 1e+10, -1e+10
tdatas = []
for data in datas:
tdata = data.copy()
if plot_transform is not None:
tdata = fun(tdata, *plot_transform[1:])
dmin = min(dmin, nm.nanmin(tdata))
dmax = max(dmax, nm.nanmax(tdata))
tdatas.append(tdata)
dmin, dmax = min(dmax - 1e-8, dmin), max(dmin + 1e-8, dmax)
return (dmin, dmax), tdatas
def plot_eigs(fig_num, plot_rsc, plot_labels, valid, freq_range, plot_range,
show=False, clear=False, new_axes=False):
"""
Plot resonance/eigen-frequencies.
`valid` must correspond to `freq_range`
resonances : red
masked resonances: dotted red
"""
if plt is None: return
assert_(len(valid) == len(freq_range))
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
l0 = l1 = None
for ii, f in enumerate(freq_range):
if valid[ii]:
l0 = ax.plot([f, f], plot_range, **plot_rsc['resonance'])[0]
else:
l1 = ax.plot([f, f], plot_range, **plot_rsc['masked'])[0]
if l0:
l0.set_label(plot_labels['resonance'])
if l1:
l1.set_label(plot_labels['masked'])
if new_axes:
ax.set_xlim([freq_range[0], freq_range[-1]])
ax.set_ylim(plot_range)
if show:
plt.show()
return fig
def plot_logs(fig_num, plot_rsc, plot_labels,
freqs, logs, valid, freq_range, plot_range,
draw_eigs=True, show_legend=True, show=False,
clear=False, new_axes=False):
"""
Plot logs of min/middle/max eigs of a mass matrix.
"""
if plt is None: return
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
if draw_eigs:
plot_eigs(fig_num, plot_rsc, plot_labels, valid, freq_range,
plot_range)
for ii, log in enumerate(logs):
l1 = ax.plot(freqs[ii], log[:, -1], **plot_rsc['eig_max'])
if log.shape[1] >= 2:
l2 = ax.plot(freqs[ii], log[:, 0], **plot_rsc['eig_min'])
else:
l2 = None
if log.shape[1] == 3:
l3 = ax.plot(freqs[ii], log[:, 1], **plot_rsc['eig_mid'])
else:
l3 = None
l1[0].set_label(plot_labels['eig_max'])
if l2:
l2[0].set_label(plot_labels['eig_min'])
if l3:
l3[0].set_label(plot_labels['eig_mid'])
fmin, fmax = freqs[0][0], freqs[-1][-1]
ax.plot([fmin, fmax], [0, 0], **plot_rsc['x_axis'])
ax.set_xlabel(plot_labels['x_axis'])
ax.set_ylabel(plot_labels['y_axis'])
if new_axes:
ax.set_xlim([fmin, fmax])
ax.set_ylim(plot_range)
if show_legend:
ax.legend()
if show:
plt.show()
return fig
def plot_gap(ax, ranges, kind, kind_desc, plot_range, plot_rsc):
"""
Plot single band gap frequency ranges as rectangles.
"""
def draw_rect(ax, x, y, rsc):
ax.fill(nm.asarray(x)[[0,1,1,0]],
nm.asarray(y)[[0,0,1,1]],
**rsc)
# Colors.
strong = plot_rsc['strong_gap']
weak = plot_rsc['weak_gap']
propagation = plot_rsc['propagation']
if kind == 'p':
draw_rect(ax, ranges[0], plot_range, propagation)
elif kind == 'w':
draw_rect(ax, ranges[0], plot_range, weak)
elif kind == 'wp':
draw_rect(ax, ranges[0], plot_range, weak)
draw_rect(ax, ranges[1], plot_range, propagation)
elif kind == 's':
draw_rect(ax, ranges[0], plot_range, strong)
elif kind == 'sw':
draw_rect(ax, ranges[0], plot_range, strong)
draw_rect(ax, ranges[1], plot_range, weak)
elif kind == 'swp':
draw_rect(ax, ranges[0], plot_range, strong)
draw_rect(ax, ranges[1], plot_range, weak)
draw_rect(ax, ranges[2], plot_range, propagation)
elif kind == 'is':
draw_rect(ax, ranges[0], plot_range, strong)
elif kind == 'iw':
draw_rect(ax, ranges[0], plot_range, weak)
else:
raise ValueError('unknown band gap kind! (%s)' % kind)
def plot_gaps(fig_num, plot_rsc, gaps, kinds, gap_ranges, freq_range,
plot_range, show=False, clear=False, new_axes=False):
"""
Plot band gaps as rectangles.
"""
if plt is None: return
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
for ii in range(len(freq_range) - 1):
f0, f1 = freq_range[[ii, ii+1]]
gap = gaps[ii]
ranges = gap_ranges[ii]
if isinstance(gap, list):
for ig, (gmin, gmax) in enumerate(gap):
kind, kind_desc = kinds[ii][ig]
plot_gap(ax, ranges[ig], kind, kind_desc, plot_range, plot_rsc)
output(ii, gmin[0], gmax[0], '%.8f' % f0, '%.8f' % f1)
output(' -> %s\n %s' %(kind_desc, ranges[ig]))
else:
gmin, gmax = gap
kind, kind_desc = kinds[ii]
plot_gap(ax, ranges, kind, kind_desc, plot_range, plot_rsc)
| output(ii, gmin[0], gmax[0], '%.8f' % f0, '%.8f' % f1) | sfepy.base.base.output |
from __future__ import absolute_import
import os.path as op
import shutil
import numpy as nm
from sfepy.base.base import ordered_iteritems, output, set_defaults, assert_
from sfepy.base.base import Struct
from sfepy.homogenization.engine import HomogenizationEngine
from sfepy.homogenization.homogen_app import HomogenizationApp
from sfepy.homogenization.coefficients import Coefficients
from sfepy.homogenization.coefs_base import CoefDummy
from sfepy.applications import PDESolverApp
from sfepy.base.plotutils import plt
import six
from six.moves import range
def try_set_defaults(obj, attr, defaults, recur=False):
try:
values = getattr(obj, attr)
except:
values = defaults
else:
if recur and isinstance(values, dict):
for key, val in six.iteritems(values):
set_defaults(val, defaults)
else:
set_defaults(values, defaults)
return values
def save_raw_bg_logs(filename, logs):
"""
Save raw band gaps `logs` into the `filename` file.
"""
out = {}
iranges = nm.cumsum([0] + [len(ii) for ii in logs.freqs])
out['iranges'] = iranges
for key, log in ordered_iteritems(logs.to_dict()):
out[key] = nm.concatenate(log, axis=0)
with open(filename, 'w') as fd:
nm.savez(fd, **out)
def transform_plot_data(datas, plot_transform, conf):
if plot_transform is not None:
fun = conf.get_function(plot_transform[0])
dmin, dmax = 1e+10, -1e+10
tdatas = []
for data in datas:
tdata = data.copy()
if plot_transform is not None:
tdata = fun(tdata, *plot_transform[1:])
dmin = min(dmin, nm.nanmin(tdata))
dmax = max(dmax, nm.nanmax(tdata))
tdatas.append(tdata)
dmin, dmax = min(dmax - 1e-8, dmin), max(dmin + 1e-8, dmax)
return (dmin, dmax), tdatas
def plot_eigs(fig_num, plot_rsc, plot_labels, valid, freq_range, plot_range,
show=False, clear=False, new_axes=False):
"""
Plot resonance/eigen-frequencies.
`valid` must correspond to `freq_range`
resonances : red
masked resonances: dotted red
"""
if plt is None: return
assert_(len(valid) == len(freq_range))
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
l0 = l1 = None
for ii, f in enumerate(freq_range):
if valid[ii]:
l0 = ax.plot([f, f], plot_range, **plot_rsc['resonance'])[0]
else:
l1 = ax.plot([f, f], plot_range, **plot_rsc['masked'])[0]
if l0:
l0.set_label(plot_labels['resonance'])
if l1:
l1.set_label(plot_labels['masked'])
if new_axes:
ax.set_xlim([freq_range[0], freq_range[-1]])
ax.set_ylim(plot_range)
if show:
plt.show()
return fig
def plot_logs(fig_num, plot_rsc, plot_labels,
freqs, logs, valid, freq_range, plot_range,
draw_eigs=True, show_legend=True, show=False,
clear=False, new_axes=False):
"""
Plot logs of min/middle/max eigs of a mass matrix.
"""
if plt is None: return
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
if draw_eigs:
plot_eigs(fig_num, plot_rsc, plot_labels, valid, freq_range,
plot_range)
for ii, log in enumerate(logs):
l1 = ax.plot(freqs[ii], log[:, -1], **plot_rsc['eig_max'])
if log.shape[1] >= 2:
l2 = ax.plot(freqs[ii], log[:, 0], **plot_rsc['eig_min'])
else:
l2 = None
if log.shape[1] == 3:
l3 = ax.plot(freqs[ii], log[:, 1], **plot_rsc['eig_mid'])
else:
l3 = None
l1[0].set_label(plot_labels['eig_max'])
if l2:
l2[0].set_label(plot_labels['eig_min'])
if l3:
l3[0].set_label(plot_labels['eig_mid'])
fmin, fmax = freqs[0][0], freqs[-1][-1]
ax.plot([fmin, fmax], [0, 0], **plot_rsc['x_axis'])
ax.set_xlabel(plot_labels['x_axis'])
ax.set_ylabel(plot_labels['y_axis'])
if new_axes:
ax.set_xlim([fmin, fmax])
ax.set_ylim(plot_range)
if show_legend:
ax.legend()
if show:
plt.show()
return fig
def plot_gap(ax, ranges, kind, kind_desc, plot_range, plot_rsc):
"""
Plot single band gap frequency ranges as rectangles.
"""
def draw_rect(ax, x, y, rsc):
ax.fill(nm.asarray(x)[[0,1,1,0]],
nm.asarray(y)[[0,0,1,1]],
**rsc)
# Colors.
strong = plot_rsc['strong_gap']
weak = plot_rsc['weak_gap']
propagation = plot_rsc['propagation']
if kind == 'p':
draw_rect(ax, ranges[0], plot_range, propagation)
elif kind == 'w':
draw_rect(ax, ranges[0], plot_range, weak)
elif kind == 'wp':
draw_rect(ax, ranges[0], plot_range, weak)
draw_rect(ax, ranges[1], plot_range, propagation)
elif kind == 's':
draw_rect(ax, ranges[0], plot_range, strong)
elif kind == 'sw':
draw_rect(ax, ranges[0], plot_range, strong)
draw_rect(ax, ranges[1], plot_range, weak)
elif kind == 'swp':
draw_rect(ax, ranges[0], plot_range, strong)
draw_rect(ax, ranges[1], plot_range, weak)
draw_rect(ax, ranges[2], plot_range, propagation)
elif kind == 'is':
draw_rect(ax, ranges[0], plot_range, strong)
elif kind == 'iw':
draw_rect(ax, ranges[0], plot_range, weak)
else:
raise ValueError('unknown band gap kind! (%s)' % kind)
def plot_gaps(fig_num, plot_rsc, gaps, kinds, gap_ranges, freq_range,
plot_range, show=False, clear=False, new_axes=False):
"""
Plot band gaps as rectangles.
"""
if plt is None: return
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
for ii in range(len(freq_range) - 1):
f0, f1 = freq_range[[ii, ii+1]]
gap = gaps[ii]
ranges = gap_ranges[ii]
if isinstance(gap, list):
for ig, (gmin, gmax) in enumerate(gap):
kind, kind_desc = kinds[ii][ig]
plot_gap(ax, ranges[ig], kind, kind_desc, plot_range, plot_rsc)
output(ii, gmin[0], gmax[0], '%.8f' % f0, '%.8f' % f1)
output(' -> %s\n %s' %(kind_desc, ranges[ig]))
else:
gmin, gmax = gap
kind, kind_desc = kinds[ii]
plot_gap(ax, ranges, kind, kind_desc, plot_range, plot_rsc)
output(ii, gmin[0], gmax[0], '%.8f' % f0, '%.8f' % f1)
| output(' -> %s\n %s' %(kind_desc, ranges)) | sfepy.base.base.output |
from __future__ import absolute_import
import os.path as op
import shutil
import numpy as nm
from sfepy.base.base import ordered_iteritems, output, set_defaults, assert_
from sfepy.base.base import Struct
from sfepy.homogenization.engine import HomogenizationEngine
from sfepy.homogenization.homogen_app import HomogenizationApp
from sfepy.homogenization.coefficients import Coefficients
from sfepy.homogenization.coefs_base import CoefDummy
from sfepy.applications import PDESolverApp
from sfepy.base.plotutils import plt
import six
from six.moves import range
def try_set_defaults(obj, attr, defaults, recur=False):
try:
values = getattr(obj, attr)
except:
values = defaults
else:
if recur and isinstance(values, dict):
for key, val in six.iteritems(values):
set_defaults(val, defaults)
else:
set_defaults(values, defaults)
return values
def save_raw_bg_logs(filename, logs):
"""
Save raw band gaps `logs` into the `filename` file.
"""
out = {}
iranges = nm.cumsum([0] + [len(ii) for ii in logs.freqs])
out['iranges'] = iranges
for key, log in ordered_iteritems(logs.to_dict()):
out[key] = nm.concatenate(log, axis=0)
with open(filename, 'w') as fd:
nm.savez(fd, **out)
def transform_plot_data(datas, plot_transform, conf):
if plot_transform is not None:
fun = conf.get_function(plot_transform[0])
dmin, dmax = 1e+10, -1e+10
tdatas = []
for data in datas:
tdata = data.copy()
if plot_transform is not None:
tdata = fun(tdata, *plot_transform[1:])
dmin = min(dmin, nm.nanmin(tdata))
dmax = max(dmax, nm.nanmax(tdata))
tdatas.append(tdata)
dmin, dmax = min(dmax - 1e-8, dmin), max(dmin + 1e-8, dmax)
return (dmin, dmax), tdatas
def plot_eigs(fig_num, plot_rsc, plot_labels, valid, freq_range, plot_range,
show=False, clear=False, new_axes=False):
"""
Plot resonance/eigen-frequencies.
`valid` must correspond to `freq_range`
resonances : red
masked resonances: dotted red
"""
if plt is None: return
assert_(len(valid) == len(freq_range))
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
l0 = l1 = None
for ii, f in enumerate(freq_range):
if valid[ii]:
l0 = ax.plot([f, f], plot_range, **plot_rsc['resonance'])[0]
else:
l1 = ax.plot([f, f], plot_range, **plot_rsc['masked'])[0]
if l0:
l0.set_label(plot_labels['resonance'])
if l1:
l1.set_label(plot_labels['masked'])
if new_axes:
ax.set_xlim([freq_range[0], freq_range[-1]])
ax.set_ylim(plot_range)
if show:
plt.show()
return fig
def plot_logs(fig_num, plot_rsc, plot_labels,
freqs, logs, valid, freq_range, plot_range,
draw_eigs=True, show_legend=True, show=False,
clear=False, new_axes=False):
"""
Plot logs of min/middle/max eigs of a mass matrix.
"""
if plt is None: return
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
if draw_eigs:
plot_eigs(fig_num, plot_rsc, plot_labels, valid, freq_range,
plot_range)
for ii, log in enumerate(logs):
l1 = ax.plot(freqs[ii], log[:, -1], **plot_rsc['eig_max'])
if log.shape[1] >= 2:
l2 = ax.plot(freqs[ii], log[:, 0], **plot_rsc['eig_min'])
else:
l2 = None
if log.shape[1] == 3:
l3 = ax.plot(freqs[ii], log[:, 1], **plot_rsc['eig_mid'])
else:
l3 = None
l1[0].set_label(plot_labels['eig_max'])
if l2:
l2[0].set_label(plot_labels['eig_min'])
if l3:
l3[0].set_label(plot_labels['eig_mid'])
fmin, fmax = freqs[0][0], freqs[-1][-1]
ax.plot([fmin, fmax], [0, 0], **plot_rsc['x_axis'])
ax.set_xlabel(plot_labels['x_axis'])
ax.set_ylabel(plot_labels['y_axis'])
if new_axes:
ax.set_xlim([fmin, fmax])
ax.set_ylim(plot_range)
if show_legend:
ax.legend()
if show:
plt.show()
return fig
def plot_gap(ax, ranges, kind, kind_desc, plot_range, plot_rsc):
"""
Plot single band gap frequency ranges as rectangles.
"""
def draw_rect(ax, x, y, rsc):
ax.fill(nm.asarray(x)[[0,1,1,0]],
nm.asarray(y)[[0,0,1,1]],
**rsc)
# Colors.
strong = plot_rsc['strong_gap']
weak = plot_rsc['weak_gap']
propagation = plot_rsc['propagation']
if kind == 'p':
draw_rect(ax, ranges[0], plot_range, propagation)
elif kind == 'w':
draw_rect(ax, ranges[0], plot_range, weak)
elif kind == 'wp':
draw_rect(ax, ranges[0], plot_range, weak)
draw_rect(ax, ranges[1], plot_range, propagation)
elif kind == 's':
draw_rect(ax, ranges[0], plot_range, strong)
elif kind == 'sw':
draw_rect(ax, ranges[0], plot_range, strong)
draw_rect(ax, ranges[1], plot_range, weak)
elif kind == 'swp':
draw_rect(ax, ranges[0], plot_range, strong)
draw_rect(ax, ranges[1], plot_range, weak)
draw_rect(ax, ranges[2], plot_range, propagation)
elif kind == 'is':
draw_rect(ax, ranges[0], plot_range, strong)
elif kind == 'iw':
draw_rect(ax, ranges[0], plot_range, weak)
else:
raise ValueError('unknown band gap kind! (%s)' % kind)
def plot_gaps(fig_num, plot_rsc, gaps, kinds, gap_ranges, freq_range,
plot_range, show=False, clear=False, new_axes=False):
"""
Plot band gaps as rectangles.
"""
if plt is None: return
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
for ii in range(len(freq_range) - 1):
f0, f1 = freq_range[[ii, ii+1]]
gap = gaps[ii]
ranges = gap_ranges[ii]
if isinstance(gap, list):
for ig, (gmin, gmax) in enumerate(gap):
kind, kind_desc = kinds[ii][ig]
plot_gap(ax, ranges[ig], kind, kind_desc, plot_range, plot_rsc)
output(ii, gmin[0], gmax[0], '%.8f' % f0, '%.8f' % f1)
output(' -> %s\n %s' %(kind_desc, ranges[ig]))
else:
gmin, gmax = gap
kind, kind_desc = kinds[ii]
plot_gap(ax, ranges, kind, kind_desc, plot_range, plot_rsc)
output(ii, gmin[0], gmax[0], '%.8f' % f0, '%.8f' % f1)
output(' -> %s\n %s' %(kind_desc, ranges))
if new_axes:
ax.set_xlim([freq_range[0], freq_range[-1]])
ax.set_ylim(plot_range)
if show:
plt.show()
return fig
def _get_fig_name(output_dir, fig_name, key, common, fig_suffix):
"""
Construct the complete name of figure file.
"""
name = key.replace(common, '')
if name and (not name.startswith('_')):
name = '_' + name
fig_name = fig_name + name + fig_suffix
return op.join(output_dir, fig_name)
class AcousticBandGapsApp(HomogenizationApp):
"""
Application for computing acoustic band gaps.
"""
@staticmethod
def process_options(options):
"""
Application options setup. Sets default values for missing
non-compulsory options.
"""
get = options.get
default_plot_options = {'show' : True,'legend' : False,}
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'min eig($M^*$)',
'eig_mid' : r'mid eig($M^*$)',
'eig_max' : r'max eig($M^*$)',
'x_axis' : r'$\sqrt{\lambda}$, $\omega$',
'y_axis' : r'eigenvalues of mass matrix $M^*$',
}
plot_labels = try_set_defaults(options, 'plot_labels', aux, recur=True)
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'$\kappa$(min)',
'eig_mid' : r'$\kappa$(mid)',
'eig_max' : r'$\kappa$(max)',
'x_axis' : r'$\sqrt{\lambda}$, $\omega$',
'y_axis' : 'polarization angles',
}
plot_labels_angle = try_set_defaults(options, 'plot_labels_angle', aux)
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'wave number (min)',
'eig_mid' : r'wave number (mid)',
'eig_max' : r'wave number (max)',
'x_axis' : r'$\sqrt{\lambda}$, $\omega$',
'y_axis' : 'wave numbers',
}
plot_labels_wave = try_set_defaults(options, 'plot_labels_wave', aux)
plot_rsc = {
'resonance' : {'linewidth' : 0.5, 'color' : 'r', 'linestyle' : '-'},
'masked' : {'linewidth' : 0.5, 'color' : 'r', 'linestyle' : ':'},
'x_axis' : {'linewidth' : 0.5, 'color' : 'k', 'linestyle' : '--'},
'eig_min' : {'linewidth' : 2.0, 'color' : (0.0, 0.0, 1.0),
'linestyle' : ':' },
'eig_mid' : {'linewidth' : 2.0, 'color' : (0.0, 0.0, 0.8),
'linestyle' : '--' },
'eig_max' : {'linewidth' : 2.0, 'color' : (0.0, 0.0, 0.6),
'linestyle' : '-' },
'strong_gap' : {'linewidth' : 0, 'facecolor' : (0.2, 0.4, 0.2)},
'weak_gap' : {'linewidth' : 0, 'facecolor' : (0.6, 0.8, 0.6)},
'propagation' : {'linewidth' : 0, 'facecolor' : (1, 1, 1)},
'params' : {'axes.labelsize': 'x-large',
'font.size': 14,
'legend.fontsize': 'large',
'legend.loc': 'best',
'xtick.labelsize': 'large',
'ytick.labelsize': 'large',
'text.usetex': True},
}
plot_rsc = try_set_defaults(options, 'plot_rsc', plot_rsc)
return Struct(incident_wave_dir=get('incident_wave_dir', None),
plot_transform=get('plot_transform', None),
plot_transform_wave=get('plot_transform_wave', None),
plot_transform_angle=get('plot_transform_angle', None),
plot_options=get('plot_options', default_plot_options),
fig_name=get('fig_name', None),
fig_name_wave=get('fig_name_wave', None),
fig_name_angle=get('fig_name_angle', None),
fig_suffix=get('fig_suffix', '.pdf'),
plot_labels=plot_labels,
plot_labels_angle=plot_labels_angle,
plot_labels_wave=plot_labels_wave,
plot_rsc=plot_rsc)
@staticmethod
def process_options_pv(options):
"""
Application options setup for phase velocity computation. Sets default
values for missing non-compulsory options.
"""
get = options.get
incident_wave_dir=get('incident_wave_dir', None,
'missing "incident_wave_dir" in options!')
return Struct(incident_wave_dir=incident_wave_dir)
def __init__(self, conf, options, output_prefix, **kwargs):
PDESolverApp.__init__(self, conf, options, output_prefix,
init_equations=False)
self.setup_options()
if conf._filename:
output_dir = self.problem.output_dir
shutil.copyfile(conf._filename,
op.join(output_dir, op.basename(conf._filename)))
def setup_options(self):
HomogenizationApp.setup_options(self)
if self.options.phase_velocity:
process_options = AcousticBandGapsApp.process_options_pv
else:
process_options = AcousticBandGapsApp.process_options
self.app_options += process_options(self.conf.options)
def call(self):
"""
Construct and call the homogenization engine accoring to options.
"""
options = self.options
opts = self.app_options
conf = self.problem.conf
coefs_name = opts.coefs
coef_info = conf.get(opts.coefs, None,
'missing "%s" in problem description!'
% opts.coefs)
if options.detect_band_gaps:
# Compute band gaps coefficients and data.
keys = [key for key in coef_info if key.startswith('band_gaps')]
elif options.analyze_dispersion or options.phase_velocity:
# Insert incident wave direction to coefficients that need it.
for key, val in six.iteritems(coef_info):
coef_opts = val.get('options', None)
if coef_opts is None: continue
if (('incident_wave_dir' in coef_opts)
and (coef_opts['incident_wave_dir'] is None)):
coef_opts['incident_wave_dir'] = opts.incident_wave_dir
if options.analyze_dispersion:
# Compute dispersion coefficients and data.
keys = [key for key in coef_info
if key.startswith('dispersion')
or key.startswith('polarization_angles')]
else:
# Compute phase velocity and its requirements.
keys = [key for key in coef_info
if key.startswith('phase_velocity')]
else:
# Compute only the eigenvalue problems.
names = [req for req in conf.get(opts.requirements, [''])
if req.startswith('evp')]
coefs = {'dummy' : {'requires' : names,
'class' : CoefDummy,}}
conf.coefs_dummy = coefs
coefs_name = 'coefs_dummy'
keys = ['dummy']
he_options = Struct(coefs=coefs_name, requirements=opts.requirements,
compute_only=keys,
post_process_hook=self.post_process_hook,
multiprocessing=False)
volumes = {}
if hasattr(opts, 'volumes') and (opts.volumes is not None):
volumes.update(opts.volumes)
elif hasattr(opts, 'volume') and (opts.volume is not None):
volumes['total'] = opts.volume
else:
volumes['total'] = 1.0
he = HomogenizationEngine(self.problem, options,
app_options=he_options,
volumes=volumes)
coefs = he()
coefs = Coefficients(**coefs.to_dict())
coefs_filename = op.join(opts.output_dir, opts.coefs_filename)
coefs.to_file_txt(coefs_filename + '.txt',
opts.tex_names,
opts.float_format)
bg_keys = [key for key in coefs.to_dict()
if key.startswith('band_gaps')
or key.startswith('dispersion')]
for ii, key in enumerate(bg_keys):
bg = coefs.get(key)
log_save_name = bg.get('log_save_name', None)
if log_save_name is not None:
filename = op.join(self.problem.output_dir, log_save_name)
bg.save_log(filename, opts.float_format, bg)
raw_log_save_name = bg.get('raw_log_save_name', None)
if raw_log_save_name is not None:
filename = op.join(self.problem.output_dir, raw_log_save_name)
save_raw_bg_logs(filename, bg.logs)
if options.plot:
if options.detect_band_gaps:
self.plot_band_gaps(coefs)
elif options.analyze_dispersion:
self.plot_dispersion(coefs)
elif options.phase_velocity:
keys = [key for key in coefs.to_dict()
if key.startswith('phase_velocity')]
for key in keys:
output('%s:' % key, coefs.get(key))
return coefs
def plot_band_gaps(self, coefs):
opts = self.app_options
bg_keys = [key for key in coefs.to_dict()
if key.startswith('band_gaps')]
plot_opts = opts.plot_options
plot_rsc = opts.plot_rsc
plt.rcParams.update(plot_rsc['params'])
for ii, key in enumerate(bg_keys):
bg = coefs.get(key)
plot_labels = opts.plot_labels.get(key, opts.plot_labels)
plot_range, teigs = transform_plot_data(bg.logs.eigs,
opts.plot_transform,
self.conf)
fig = plot_gaps(ii, plot_rsc, bg.gaps, bg.kinds, bg.gap_ranges,
bg.freq_range_margins, plot_range,
clear=True)
fig = plot_logs(ii, plot_rsc, plot_labels, bg.logs.freqs, teigs,
bg.valid[bg.eig_range],
bg.freq_range_initial,
plot_range,
show_legend=plot_opts['legend'],
new_axes=True)
| plt.tight_layout() | sfepy.base.plotutils.plt.tight_layout |
from __future__ import absolute_import
import os.path as op
import shutil
import numpy as nm
from sfepy.base.base import ordered_iteritems, output, set_defaults, assert_
from sfepy.base.base import Struct
from sfepy.homogenization.engine import HomogenizationEngine
from sfepy.homogenization.homogen_app import HomogenizationApp
from sfepy.homogenization.coefficients import Coefficients
from sfepy.homogenization.coefs_base import CoefDummy
from sfepy.applications import PDESolverApp
from sfepy.base.plotutils import plt
import six
from six.moves import range
def try_set_defaults(obj, attr, defaults, recur=False):
try:
values = getattr(obj, attr)
except:
values = defaults
else:
if recur and isinstance(values, dict):
for key, val in six.iteritems(values):
set_defaults(val, defaults)
else:
set_defaults(values, defaults)
return values
def save_raw_bg_logs(filename, logs):
"""
Save raw band gaps `logs` into the `filename` file.
"""
out = {}
iranges = nm.cumsum([0] + [len(ii) for ii in logs.freqs])
out['iranges'] = iranges
for key, log in ordered_iteritems(logs.to_dict()):
out[key] = nm.concatenate(log, axis=0)
with open(filename, 'w') as fd:
nm.savez(fd, **out)
def transform_plot_data(datas, plot_transform, conf):
if plot_transform is not None:
fun = conf.get_function(plot_transform[0])
dmin, dmax = 1e+10, -1e+10
tdatas = []
for data in datas:
tdata = data.copy()
if plot_transform is not None:
tdata = fun(tdata, *plot_transform[1:])
dmin = min(dmin, nm.nanmin(tdata))
dmax = max(dmax, nm.nanmax(tdata))
tdatas.append(tdata)
dmin, dmax = min(dmax - 1e-8, dmin), max(dmin + 1e-8, dmax)
return (dmin, dmax), tdatas
def plot_eigs(fig_num, plot_rsc, plot_labels, valid, freq_range, plot_range,
show=False, clear=False, new_axes=False):
"""
Plot resonance/eigen-frequencies.
`valid` must correspond to `freq_range`
resonances : red
masked resonances: dotted red
"""
if plt is None: return
assert_(len(valid) == len(freq_range))
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
l0 = l1 = None
for ii, f in enumerate(freq_range):
if valid[ii]:
l0 = ax.plot([f, f], plot_range, **plot_rsc['resonance'])[0]
else:
l1 = ax.plot([f, f], plot_range, **plot_rsc['masked'])[0]
if l0:
l0.set_label(plot_labels['resonance'])
if l1:
l1.set_label(plot_labels['masked'])
if new_axes:
ax.set_xlim([freq_range[0], freq_range[-1]])
ax.set_ylim(plot_range)
if show:
plt.show()
return fig
def plot_logs(fig_num, plot_rsc, plot_labels,
freqs, logs, valid, freq_range, plot_range,
draw_eigs=True, show_legend=True, show=False,
clear=False, new_axes=False):
"""
Plot logs of min/middle/max eigs of a mass matrix.
"""
if plt is None: return
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
if draw_eigs:
plot_eigs(fig_num, plot_rsc, plot_labels, valid, freq_range,
plot_range)
for ii, log in enumerate(logs):
l1 = ax.plot(freqs[ii], log[:, -1], **plot_rsc['eig_max'])
if log.shape[1] >= 2:
l2 = ax.plot(freqs[ii], log[:, 0], **plot_rsc['eig_min'])
else:
l2 = None
if log.shape[1] == 3:
l3 = ax.plot(freqs[ii], log[:, 1], **plot_rsc['eig_mid'])
else:
l3 = None
l1[0].set_label(plot_labels['eig_max'])
if l2:
l2[0].set_label(plot_labels['eig_min'])
if l3:
l3[0].set_label(plot_labels['eig_mid'])
fmin, fmax = freqs[0][0], freqs[-1][-1]
ax.plot([fmin, fmax], [0, 0], **plot_rsc['x_axis'])
ax.set_xlabel(plot_labels['x_axis'])
ax.set_ylabel(plot_labels['y_axis'])
if new_axes:
ax.set_xlim([fmin, fmax])
ax.set_ylim(plot_range)
if show_legend:
ax.legend()
if show:
plt.show()
return fig
def plot_gap(ax, ranges, kind, kind_desc, plot_range, plot_rsc):
"""
Plot single band gap frequency ranges as rectangles.
"""
def draw_rect(ax, x, y, rsc):
ax.fill(nm.asarray(x)[[0,1,1,0]],
nm.asarray(y)[[0,0,1,1]],
**rsc)
# Colors.
strong = plot_rsc['strong_gap']
weak = plot_rsc['weak_gap']
propagation = plot_rsc['propagation']
if kind == 'p':
draw_rect(ax, ranges[0], plot_range, propagation)
elif kind == 'w':
draw_rect(ax, ranges[0], plot_range, weak)
elif kind == 'wp':
draw_rect(ax, ranges[0], plot_range, weak)
draw_rect(ax, ranges[1], plot_range, propagation)
elif kind == 's':
draw_rect(ax, ranges[0], plot_range, strong)
elif kind == 'sw':
draw_rect(ax, ranges[0], plot_range, strong)
draw_rect(ax, ranges[1], plot_range, weak)
elif kind == 'swp':
draw_rect(ax, ranges[0], plot_range, strong)
draw_rect(ax, ranges[1], plot_range, weak)
draw_rect(ax, ranges[2], plot_range, propagation)
elif kind == 'is':
draw_rect(ax, ranges[0], plot_range, strong)
elif kind == 'iw':
draw_rect(ax, ranges[0], plot_range, weak)
else:
raise ValueError('unknown band gap kind! (%s)' % kind)
def plot_gaps(fig_num, plot_rsc, gaps, kinds, gap_ranges, freq_range,
plot_range, show=False, clear=False, new_axes=False):
"""
Plot band gaps as rectangles.
"""
if plt is None: return
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
for ii in range(len(freq_range) - 1):
f0, f1 = freq_range[[ii, ii+1]]
gap = gaps[ii]
ranges = gap_ranges[ii]
if isinstance(gap, list):
for ig, (gmin, gmax) in enumerate(gap):
kind, kind_desc = kinds[ii][ig]
plot_gap(ax, ranges[ig], kind, kind_desc, plot_range, plot_rsc)
output(ii, gmin[0], gmax[0], '%.8f' % f0, '%.8f' % f1)
output(' -> %s\n %s' %(kind_desc, ranges[ig]))
else:
gmin, gmax = gap
kind, kind_desc = kinds[ii]
plot_gap(ax, ranges, kind, kind_desc, plot_range, plot_rsc)
output(ii, gmin[0], gmax[0], '%.8f' % f0, '%.8f' % f1)
output(' -> %s\n %s' %(kind_desc, ranges))
if new_axes:
ax.set_xlim([freq_range[0], freq_range[-1]])
ax.set_ylim(plot_range)
if show:
plt.show()
return fig
def _get_fig_name(output_dir, fig_name, key, common, fig_suffix):
"""
Construct the complete name of figure file.
"""
name = key.replace(common, '')
if name and (not name.startswith('_')):
name = '_' + name
fig_name = fig_name + name + fig_suffix
return op.join(output_dir, fig_name)
class AcousticBandGapsApp(HomogenizationApp):
"""
Application for computing acoustic band gaps.
"""
@staticmethod
def process_options(options):
"""
Application options setup. Sets default values for missing
non-compulsory options.
"""
get = options.get
default_plot_options = {'show' : True,'legend' : False,}
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'min eig($M^*$)',
'eig_mid' : r'mid eig($M^*$)',
'eig_max' : r'max eig($M^*$)',
'x_axis' : r'$\sqrt{\lambda}$, $\omega$',
'y_axis' : r'eigenvalues of mass matrix $M^*$',
}
plot_labels = try_set_defaults(options, 'plot_labels', aux, recur=True)
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'$\kappa$(min)',
'eig_mid' : r'$\kappa$(mid)',
'eig_max' : r'$\kappa$(max)',
'x_axis' : r'$\sqrt{\lambda}$, $\omega$',
'y_axis' : 'polarization angles',
}
plot_labels_angle = try_set_defaults(options, 'plot_labels_angle', aux)
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'wave number (min)',
'eig_mid' : r'wave number (mid)',
'eig_max' : r'wave number (max)',
'x_axis' : r'$\sqrt{\lambda}$, $\omega$',
'y_axis' : 'wave numbers',
}
plot_labels_wave = try_set_defaults(options, 'plot_labels_wave', aux)
plot_rsc = {
'resonance' : {'linewidth' : 0.5, 'color' : 'r', 'linestyle' : '-'},
'masked' : {'linewidth' : 0.5, 'color' : 'r', 'linestyle' : ':'},
'x_axis' : {'linewidth' : 0.5, 'color' : 'k', 'linestyle' : '--'},
'eig_min' : {'linewidth' : 2.0, 'color' : (0.0, 0.0, 1.0),
'linestyle' : ':' },
'eig_mid' : {'linewidth' : 2.0, 'color' : (0.0, 0.0, 0.8),
'linestyle' : '--' },
'eig_max' : {'linewidth' : 2.0, 'color' : (0.0, 0.0, 0.6),
'linestyle' : '-' },
'strong_gap' : {'linewidth' : 0, 'facecolor' : (0.2, 0.4, 0.2)},
'weak_gap' : {'linewidth' : 0, 'facecolor' : (0.6, 0.8, 0.6)},
'propagation' : {'linewidth' : 0, 'facecolor' : (1, 1, 1)},
'params' : {'axes.labelsize': 'x-large',
'font.size': 14,
'legend.fontsize': 'large',
'legend.loc': 'best',
'xtick.labelsize': 'large',
'ytick.labelsize': 'large',
'text.usetex': True},
}
plot_rsc = try_set_defaults(options, 'plot_rsc', plot_rsc)
return Struct(incident_wave_dir=get('incident_wave_dir', None),
plot_transform=get('plot_transform', None),
plot_transform_wave=get('plot_transform_wave', None),
plot_transform_angle=get('plot_transform_angle', None),
plot_options=get('plot_options', default_plot_options),
fig_name=get('fig_name', None),
fig_name_wave=get('fig_name_wave', None),
fig_name_angle=get('fig_name_angle', None),
fig_suffix=get('fig_suffix', '.pdf'),
plot_labels=plot_labels,
plot_labels_angle=plot_labels_angle,
plot_labels_wave=plot_labels_wave,
plot_rsc=plot_rsc)
@staticmethod
def process_options_pv(options):
"""
Application options setup for phase velocity computation. Sets default
values for missing non-compulsory options.
"""
get = options.get
incident_wave_dir=get('incident_wave_dir', None,
'missing "incident_wave_dir" in options!')
return Struct(incident_wave_dir=incident_wave_dir)
def __init__(self, conf, options, output_prefix, **kwargs):
PDESolverApp.__init__(self, conf, options, output_prefix,
init_equations=False)
self.setup_options()
if conf._filename:
output_dir = self.problem.output_dir
shutil.copyfile(conf._filename,
op.join(output_dir, op.basename(conf._filename)))
def setup_options(self):
HomogenizationApp.setup_options(self)
if self.options.phase_velocity:
process_options = AcousticBandGapsApp.process_options_pv
else:
process_options = AcousticBandGapsApp.process_options
self.app_options += process_options(self.conf.options)
def call(self):
"""
Construct and call the homogenization engine accoring to options.
"""
options = self.options
opts = self.app_options
conf = self.problem.conf
coefs_name = opts.coefs
coef_info = conf.get(opts.coefs, None,
'missing "%s" in problem description!'
% opts.coefs)
if options.detect_band_gaps:
# Compute band gaps coefficients and data.
keys = [key for key in coef_info if key.startswith('band_gaps')]
elif options.analyze_dispersion or options.phase_velocity:
# Insert incident wave direction to coefficients that need it.
for key, val in six.iteritems(coef_info):
coef_opts = val.get('options', None)
if coef_opts is None: continue
if (('incident_wave_dir' in coef_opts)
and (coef_opts['incident_wave_dir'] is None)):
coef_opts['incident_wave_dir'] = opts.incident_wave_dir
if options.analyze_dispersion:
# Compute dispersion coefficients and data.
keys = [key for key in coef_info
if key.startswith('dispersion')
or key.startswith('polarization_angles')]
else:
# Compute phase velocity and its requirements.
keys = [key for key in coef_info
if key.startswith('phase_velocity')]
else:
# Compute only the eigenvalue problems.
names = [req for req in conf.get(opts.requirements, [''])
if req.startswith('evp')]
coefs = {'dummy' : {'requires' : names,
'class' : CoefDummy,}}
conf.coefs_dummy = coefs
coefs_name = 'coefs_dummy'
keys = ['dummy']
he_options = Struct(coefs=coefs_name, requirements=opts.requirements,
compute_only=keys,
post_process_hook=self.post_process_hook,
multiprocessing=False)
volumes = {}
if hasattr(opts, 'volumes') and (opts.volumes is not None):
volumes.update(opts.volumes)
elif hasattr(opts, 'volume') and (opts.volume is not None):
volumes['total'] = opts.volume
else:
volumes['total'] = 1.0
he = HomogenizationEngine(self.problem, options,
app_options=he_options,
volumes=volumes)
coefs = he()
coefs = Coefficients(**coefs.to_dict())
coefs_filename = op.join(opts.output_dir, opts.coefs_filename)
coefs.to_file_txt(coefs_filename + '.txt',
opts.tex_names,
opts.float_format)
bg_keys = [key for key in coefs.to_dict()
if key.startswith('band_gaps')
or key.startswith('dispersion')]
for ii, key in enumerate(bg_keys):
bg = coefs.get(key)
log_save_name = bg.get('log_save_name', None)
if log_save_name is not None:
filename = op.join(self.problem.output_dir, log_save_name)
bg.save_log(filename, opts.float_format, bg)
raw_log_save_name = bg.get('raw_log_save_name', None)
if raw_log_save_name is not None:
filename = op.join(self.problem.output_dir, raw_log_save_name)
save_raw_bg_logs(filename, bg.logs)
if options.plot:
if options.detect_band_gaps:
self.plot_band_gaps(coefs)
elif options.analyze_dispersion:
self.plot_dispersion(coefs)
elif options.phase_velocity:
keys = [key for key in coefs.to_dict()
if key.startswith('phase_velocity')]
for key in keys:
output('%s:' % key, coefs.get(key))
return coefs
def plot_band_gaps(self, coefs):
opts = self.app_options
bg_keys = [key for key in coefs.to_dict()
if key.startswith('band_gaps')]
plot_opts = opts.plot_options
plot_rsc = opts.plot_rsc
plt.rcParams.update(plot_rsc['params'])
for ii, key in enumerate(bg_keys):
bg = coefs.get(key)
plot_labels = opts.plot_labels.get(key, opts.plot_labels)
plot_range, teigs = transform_plot_data(bg.logs.eigs,
opts.plot_transform,
self.conf)
fig = plot_gaps(ii, plot_rsc, bg.gaps, bg.kinds, bg.gap_ranges,
bg.freq_range_margins, plot_range,
clear=True)
fig = plot_logs(ii, plot_rsc, plot_labels, bg.logs.freqs, teigs,
bg.valid[bg.eig_range],
bg.freq_range_initial,
plot_range,
show_legend=plot_opts['legend'],
new_axes=True)
plt.tight_layout()
if opts.fig_name is not None:
fig_name = _get_fig_name(self.problem.output_dir, opts.fig_name,
key, 'band_gaps', opts.fig_suffix)
fig.savefig(fig_name)
if plot_opts['show']:
| plt.show() | sfepy.base.plotutils.plt.show |
from __future__ import absolute_import
import os.path as op
import shutil
import numpy as nm
from sfepy.base.base import ordered_iteritems, output, set_defaults, assert_
from sfepy.base.base import Struct
from sfepy.homogenization.engine import HomogenizationEngine
from sfepy.homogenization.homogen_app import HomogenizationApp
from sfepy.homogenization.coefficients import Coefficients
from sfepy.homogenization.coefs_base import CoefDummy
from sfepy.applications import PDESolverApp
from sfepy.base.plotutils import plt
import six
from six.moves import range
def try_set_defaults(obj, attr, defaults, recur=False):
try:
values = getattr(obj, attr)
except:
values = defaults
else:
if recur and isinstance(values, dict):
for key, val in six.iteritems(values):
set_defaults(val, defaults)
else:
set_defaults(values, defaults)
return values
def save_raw_bg_logs(filename, logs):
"""
Save raw band gaps `logs` into the `filename` file.
"""
out = {}
iranges = nm.cumsum([0] + [len(ii) for ii in logs.freqs])
out['iranges'] = iranges
for key, log in ordered_iteritems(logs.to_dict()):
out[key] = nm.concatenate(log, axis=0)
with open(filename, 'w') as fd:
nm.savez(fd, **out)
def transform_plot_data(datas, plot_transform, conf):
if plot_transform is not None:
fun = conf.get_function(plot_transform[0])
dmin, dmax = 1e+10, -1e+10
tdatas = []
for data in datas:
tdata = data.copy()
if plot_transform is not None:
tdata = fun(tdata, *plot_transform[1:])
dmin = min(dmin, nm.nanmin(tdata))
dmax = max(dmax, nm.nanmax(tdata))
tdatas.append(tdata)
dmin, dmax = min(dmax - 1e-8, dmin), max(dmin + 1e-8, dmax)
return (dmin, dmax), tdatas
def plot_eigs(fig_num, plot_rsc, plot_labels, valid, freq_range, plot_range,
show=False, clear=False, new_axes=False):
"""
Plot resonance/eigen-frequencies.
`valid` must correspond to `freq_range`
resonances : red
masked resonances: dotted red
"""
if plt is None: return
assert_(len(valid) == len(freq_range))
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
l0 = l1 = None
for ii, f in enumerate(freq_range):
if valid[ii]:
l0 = ax.plot([f, f], plot_range, **plot_rsc['resonance'])[0]
else:
l1 = ax.plot([f, f], plot_range, **plot_rsc['masked'])[0]
if l0:
l0.set_label(plot_labels['resonance'])
if l1:
l1.set_label(plot_labels['masked'])
if new_axes:
ax.set_xlim([freq_range[0], freq_range[-1]])
ax.set_ylim(plot_range)
if show:
plt.show()
return fig
def plot_logs(fig_num, plot_rsc, plot_labels,
freqs, logs, valid, freq_range, plot_range,
draw_eigs=True, show_legend=True, show=False,
clear=False, new_axes=False):
"""
Plot logs of min/middle/max eigs of a mass matrix.
"""
if plt is None: return
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
if draw_eigs:
plot_eigs(fig_num, plot_rsc, plot_labels, valid, freq_range,
plot_range)
for ii, log in enumerate(logs):
l1 = ax.plot(freqs[ii], log[:, -1], **plot_rsc['eig_max'])
if log.shape[1] >= 2:
l2 = ax.plot(freqs[ii], log[:, 0], **plot_rsc['eig_min'])
else:
l2 = None
if log.shape[1] == 3:
l3 = ax.plot(freqs[ii], log[:, 1], **plot_rsc['eig_mid'])
else:
l3 = None
l1[0].set_label(plot_labels['eig_max'])
if l2:
l2[0].set_label(plot_labels['eig_min'])
if l3:
l3[0].set_label(plot_labels['eig_mid'])
fmin, fmax = freqs[0][0], freqs[-1][-1]
ax.plot([fmin, fmax], [0, 0], **plot_rsc['x_axis'])
ax.set_xlabel(plot_labels['x_axis'])
ax.set_ylabel(plot_labels['y_axis'])
if new_axes:
ax.set_xlim([fmin, fmax])
ax.set_ylim(plot_range)
if show_legend:
ax.legend()
if show:
plt.show()
return fig
def plot_gap(ax, ranges, kind, kind_desc, plot_range, plot_rsc):
"""
Plot single band gap frequency ranges as rectangles.
"""
def draw_rect(ax, x, y, rsc):
ax.fill(nm.asarray(x)[[0,1,1,0]],
nm.asarray(y)[[0,0,1,1]],
**rsc)
# Colors.
strong = plot_rsc['strong_gap']
weak = plot_rsc['weak_gap']
propagation = plot_rsc['propagation']
if kind == 'p':
draw_rect(ax, ranges[0], plot_range, propagation)
elif kind == 'w':
draw_rect(ax, ranges[0], plot_range, weak)
elif kind == 'wp':
draw_rect(ax, ranges[0], plot_range, weak)
draw_rect(ax, ranges[1], plot_range, propagation)
elif kind == 's':
draw_rect(ax, ranges[0], plot_range, strong)
elif kind == 'sw':
draw_rect(ax, ranges[0], plot_range, strong)
draw_rect(ax, ranges[1], plot_range, weak)
elif kind == 'swp':
draw_rect(ax, ranges[0], plot_range, strong)
draw_rect(ax, ranges[1], plot_range, weak)
draw_rect(ax, ranges[2], plot_range, propagation)
elif kind == 'is':
draw_rect(ax, ranges[0], plot_range, strong)
elif kind == 'iw':
draw_rect(ax, ranges[0], plot_range, weak)
else:
raise ValueError('unknown band gap kind! (%s)' % kind)
def plot_gaps(fig_num, plot_rsc, gaps, kinds, gap_ranges, freq_range,
plot_range, show=False, clear=False, new_axes=False):
"""
Plot band gaps as rectangles.
"""
if plt is None: return
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
for ii in range(len(freq_range) - 1):
f0, f1 = freq_range[[ii, ii+1]]
gap = gaps[ii]
ranges = gap_ranges[ii]
if isinstance(gap, list):
for ig, (gmin, gmax) in enumerate(gap):
kind, kind_desc = kinds[ii][ig]
plot_gap(ax, ranges[ig], kind, kind_desc, plot_range, plot_rsc)
output(ii, gmin[0], gmax[0], '%.8f' % f0, '%.8f' % f1)
output(' -> %s\n %s' %(kind_desc, ranges[ig]))
else:
gmin, gmax = gap
kind, kind_desc = kinds[ii]
plot_gap(ax, ranges, kind, kind_desc, plot_range, plot_rsc)
output(ii, gmin[0], gmax[0], '%.8f' % f0, '%.8f' % f1)
output(' -> %s\n %s' %(kind_desc, ranges))
if new_axes:
ax.set_xlim([freq_range[0], freq_range[-1]])
ax.set_ylim(plot_range)
if show:
plt.show()
return fig
def _get_fig_name(output_dir, fig_name, key, common, fig_suffix):
"""
Construct the complete name of figure file.
"""
name = key.replace(common, '')
if name and (not name.startswith('_')):
name = '_' + name
fig_name = fig_name + name + fig_suffix
return op.join(output_dir, fig_name)
class AcousticBandGapsApp(HomogenizationApp):
"""
Application for computing acoustic band gaps.
"""
@staticmethod
def process_options(options):
"""
Application options setup. Sets default values for missing
non-compulsory options.
"""
get = options.get
default_plot_options = {'show' : True,'legend' : False,}
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'min eig($M^*$)',
'eig_mid' : r'mid eig($M^*$)',
'eig_max' : r'max eig($M^*$)',
'x_axis' : r'$\sqrt{\lambda}$, $\omega$',
'y_axis' : r'eigenvalues of mass matrix $M^*$',
}
plot_labels = try_set_defaults(options, 'plot_labels', aux, recur=True)
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'$\kappa$(min)',
'eig_mid' : r'$\kappa$(mid)',
'eig_max' : r'$\kappa$(max)',
'x_axis' : r'$\sqrt{\lambda}$, $\omega$',
'y_axis' : 'polarization angles',
}
plot_labels_angle = try_set_defaults(options, 'plot_labels_angle', aux)
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'wave number (min)',
'eig_mid' : r'wave number (mid)',
'eig_max' : r'wave number (max)',
'x_axis' : r'$\sqrt{\lambda}$, $\omega$',
'y_axis' : 'wave numbers',
}
plot_labels_wave = try_set_defaults(options, 'plot_labels_wave', aux)
plot_rsc = {
'resonance' : {'linewidth' : 0.5, 'color' : 'r', 'linestyle' : '-'},
'masked' : {'linewidth' : 0.5, 'color' : 'r', 'linestyle' : ':'},
'x_axis' : {'linewidth' : 0.5, 'color' : 'k', 'linestyle' : '--'},
'eig_min' : {'linewidth' : 2.0, 'color' : (0.0, 0.0, 1.0),
'linestyle' : ':' },
'eig_mid' : {'linewidth' : 2.0, 'color' : (0.0, 0.0, 0.8),
'linestyle' : '--' },
'eig_max' : {'linewidth' : 2.0, 'color' : (0.0, 0.0, 0.6),
'linestyle' : '-' },
'strong_gap' : {'linewidth' : 0, 'facecolor' : (0.2, 0.4, 0.2)},
'weak_gap' : {'linewidth' : 0, 'facecolor' : (0.6, 0.8, 0.6)},
'propagation' : {'linewidth' : 0, 'facecolor' : (1, 1, 1)},
'params' : {'axes.labelsize': 'x-large',
'font.size': 14,
'legend.fontsize': 'large',
'legend.loc': 'best',
'xtick.labelsize': 'large',
'ytick.labelsize': 'large',
'text.usetex': True},
}
plot_rsc = try_set_defaults(options, 'plot_rsc', plot_rsc)
return Struct(incident_wave_dir=get('incident_wave_dir', None),
plot_transform=get('plot_transform', None),
plot_transform_wave=get('plot_transform_wave', None),
plot_transform_angle=get('plot_transform_angle', None),
plot_options=get('plot_options', default_plot_options),
fig_name=get('fig_name', None),
fig_name_wave=get('fig_name_wave', None),
fig_name_angle=get('fig_name_angle', None),
fig_suffix=get('fig_suffix', '.pdf'),
plot_labels=plot_labels,
plot_labels_angle=plot_labels_angle,
plot_labels_wave=plot_labels_wave,
plot_rsc=plot_rsc)
@staticmethod
def process_options_pv(options):
"""
Application options setup for phase velocity computation. Sets default
values for missing non-compulsory options.
"""
get = options.get
incident_wave_dir=get('incident_wave_dir', None,
'missing "incident_wave_dir" in options!')
return Struct(incident_wave_dir=incident_wave_dir)
def __init__(self, conf, options, output_prefix, **kwargs):
PDESolverApp.__init__(self, conf, options, output_prefix,
init_equations=False)
self.setup_options()
if conf._filename:
output_dir = self.problem.output_dir
shutil.copyfile(conf._filename,
op.join(output_dir, op.basename(conf._filename)))
def setup_options(self):
HomogenizationApp.setup_options(self)
if self.options.phase_velocity:
process_options = AcousticBandGapsApp.process_options_pv
else:
process_options = AcousticBandGapsApp.process_options
self.app_options += process_options(self.conf.options)
def call(self):
"""
Construct and call the homogenization engine accoring to options.
"""
options = self.options
opts = self.app_options
conf = self.problem.conf
coefs_name = opts.coefs
coef_info = conf.get(opts.coefs, None,
'missing "%s" in problem description!'
% opts.coefs)
if options.detect_band_gaps:
# Compute band gaps coefficients and data.
keys = [key for key in coef_info if key.startswith('band_gaps')]
elif options.analyze_dispersion or options.phase_velocity:
# Insert incident wave direction to coefficients that need it.
for key, val in six.iteritems(coef_info):
coef_opts = val.get('options', None)
if coef_opts is None: continue
if (('incident_wave_dir' in coef_opts)
and (coef_opts['incident_wave_dir'] is None)):
coef_opts['incident_wave_dir'] = opts.incident_wave_dir
if options.analyze_dispersion:
# Compute dispersion coefficients and data.
keys = [key for key in coef_info
if key.startswith('dispersion')
or key.startswith('polarization_angles')]
else:
# Compute phase velocity and its requirements.
keys = [key for key in coef_info
if key.startswith('phase_velocity')]
else:
# Compute only the eigenvalue problems.
names = [req for req in conf.get(opts.requirements, [''])
if req.startswith('evp')]
coefs = {'dummy' : {'requires' : names,
'class' : CoefDummy,}}
conf.coefs_dummy = coefs
coefs_name = 'coefs_dummy'
keys = ['dummy']
he_options = Struct(coefs=coefs_name, requirements=opts.requirements,
compute_only=keys,
post_process_hook=self.post_process_hook,
multiprocessing=False)
volumes = {}
if hasattr(opts, 'volumes') and (opts.volumes is not None):
volumes.update(opts.volumes)
elif hasattr(opts, 'volume') and (opts.volume is not None):
volumes['total'] = opts.volume
else:
volumes['total'] = 1.0
he = HomogenizationEngine(self.problem, options,
app_options=he_options,
volumes=volumes)
coefs = he()
coefs = Coefficients(**coefs.to_dict())
coefs_filename = op.join(opts.output_dir, opts.coefs_filename)
coefs.to_file_txt(coefs_filename + '.txt',
opts.tex_names,
opts.float_format)
bg_keys = [key for key in coefs.to_dict()
if key.startswith('band_gaps')
or key.startswith('dispersion')]
for ii, key in enumerate(bg_keys):
bg = coefs.get(key)
log_save_name = bg.get('log_save_name', None)
if log_save_name is not None:
filename = op.join(self.problem.output_dir, log_save_name)
bg.save_log(filename, opts.float_format, bg)
raw_log_save_name = bg.get('raw_log_save_name', None)
if raw_log_save_name is not None:
filename = op.join(self.problem.output_dir, raw_log_save_name)
save_raw_bg_logs(filename, bg.logs)
if options.plot:
if options.detect_band_gaps:
self.plot_band_gaps(coefs)
elif options.analyze_dispersion:
self.plot_dispersion(coefs)
elif options.phase_velocity:
keys = [key for key in coefs.to_dict()
if key.startswith('phase_velocity')]
for key in keys:
output('%s:' % key, coefs.get(key))
return coefs
def plot_band_gaps(self, coefs):
opts = self.app_options
bg_keys = [key for key in coefs.to_dict()
if key.startswith('band_gaps')]
plot_opts = opts.plot_options
plot_rsc = opts.plot_rsc
plt.rcParams.update(plot_rsc['params'])
for ii, key in enumerate(bg_keys):
bg = coefs.get(key)
plot_labels = opts.plot_labels.get(key, opts.plot_labels)
plot_range, teigs = transform_plot_data(bg.logs.eigs,
opts.plot_transform,
self.conf)
fig = plot_gaps(ii, plot_rsc, bg.gaps, bg.kinds, bg.gap_ranges,
bg.freq_range_margins, plot_range,
clear=True)
fig = plot_logs(ii, plot_rsc, plot_labels, bg.logs.freqs, teigs,
bg.valid[bg.eig_range],
bg.freq_range_initial,
plot_range,
show_legend=plot_opts['legend'],
new_axes=True)
plt.tight_layout()
if opts.fig_name is not None:
fig_name = _get_fig_name(self.problem.output_dir, opts.fig_name,
key, 'band_gaps', opts.fig_suffix)
fig.savefig(fig_name)
if plot_opts['show']:
plt.show()
def plot_dispersion(self, coefs):
opts = self.app_options
bg_keys = [key for key in coefs.to_dict()
if key.startswith('dispersion')]
plot_rsc = opts.plot_rsc
plot_opts = opts.plot_options
plt.rcParams.update(plot_rsc['params'])
plot_labels = opts.plot_labels_angle
for ii, key in enumerate(bg_keys):
pas_key = key.replace('dispersion', 'polarization_angles')
pas = coefs.get(pas_key)
aux = transform_plot_data(pas,
opts.plot_transform_angle,
self.conf)
plot_range, pas = aux
bg = coefs.get(key)
fig = plot_gaps(1, plot_rsc, bg.gaps, bg.kinds, bg.gap_ranges,
bg.freq_range_margins, plot_range,
clear=True)
fig = plot_logs(1, plot_rsc, plot_labels, bg.logs.freqs, pas,
bg.valid[bg.eig_range],
bg.freq_range_initial,
plot_range,
show_legend=plot_opts['legend'],
new_axes=True)
| plt.tight_layout() | sfepy.base.plotutils.plt.tight_layout |
from __future__ import absolute_import
import os.path as op
import shutil
import numpy as nm
from sfepy.base.base import ordered_iteritems, output, set_defaults, assert_
from sfepy.base.base import Struct
from sfepy.homogenization.engine import HomogenizationEngine
from sfepy.homogenization.homogen_app import HomogenizationApp
from sfepy.homogenization.coefficients import Coefficients
from sfepy.homogenization.coefs_base import CoefDummy
from sfepy.applications import PDESolverApp
from sfepy.base.plotutils import plt
import six
from six.moves import range
def try_set_defaults(obj, attr, defaults, recur=False):
try:
values = getattr(obj, attr)
except:
values = defaults
else:
if recur and isinstance(values, dict):
for key, val in six.iteritems(values):
set_defaults(val, defaults)
else:
set_defaults(values, defaults)
return values
def save_raw_bg_logs(filename, logs):
"""
Save raw band gaps `logs` into the `filename` file.
"""
out = {}
iranges = nm.cumsum([0] + [len(ii) for ii in logs.freqs])
out['iranges'] = iranges
for key, log in ordered_iteritems(logs.to_dict()):
out[key] = nm.concatenate(log, axis=0)
with open(filename, 'w') as fd:
nm.savez(fd, **out)
def transform_plot_data(datas, plot_transform, conf):
if plot_transform is not None:
fun = conf.get_function(plot_transform[0])
dmin, dmax = 1e+10, -1e+10
tdatas = []
for data in datas:
tdata = data.copy()
if plot_transform is not None:
tdata = fun(tdata, *plot_transform[1:])
dmin = min(dmin, nm.nanmin(tdata))
dmax = max(dmax, nm.nanmax(tdata))
tdatas.append(tdata)
dmin, dmax = min(dmax - 1e-8, dmin), max(dmin + 1e-8, dmax)
return (dmin, dmax), tdatas
def plot_eigs(fig_num, plot_rsc, plot_labels, valid, freq_range, plot_range,
show=False, clear=False, new_axes=False):
"""
Plot resonance/eigen-frequencies.
`valid` must correspond to `freq_range`
resonances : red
masked resonances: dotted red
"""
if plt is None: return
assert_(len(valid) == len(freq_range))
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
l0 = l1 = None
for ii, f in enumerate(freq_range):
if valid[ii]:
l0 = ax.plot([f, f], plot_range, **plot_rsc['resonance'])[0]
else:
l1 = ax.plot([f, f], plot_range, **plot_rsc['masked'])[0]
if l0:
l0.set_label(plot_labels['resonance'])
if l1:
l1.set_label(plot_labels['masked'])
if new_axes:
ax.set_xlim([freq_range[0], freq_range[-1]])
ax.set_ylim(plot_range)
if show:
plt.show()
return fig
def plot_logs(fig_num, plot_rsc, plot_labels,
freqs, logs, valid, freq_range, plot_range,
draw_eigs=True, show_legend=True, show=False,
clear=False, new_axes=False):
"""
Plot logs of min/middle/max eigs of a mass matrix.
"""
if plt is None: return
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
if draw_eigs:
plot_eigs(fig_num, plot_rsc, plot_labels, valid, freq_range,
plot_range)
for ii, log in enumerate(logs):
l1 = ax.plot(freqs[ii], log[:, -1], **plot_rsc['eig_max'])
if log.shape[1] >= 2:
l2 = ax.plot(freqs[ii], log[:, 0], **plot_rsc['eig_min'])
else:
l2 = None
if log.shape[1] == 3:
l3 = ax.plot(freqs[ii], log[:, 1], **plot_rsc['eig_mid'])
else:
l3 = None
l1[0].set_label(plot_labels['eig_max'])
if l2:
l2[0].set_label(plot_labels['eig_min'])
if l3:
l3[0].set_label(plot_labels['eig_mid'])
fmin, fmax = freqs[0][0], freqs[-1][-1]
ax.plot([fmin, fmax], [0, 0], **plot_rsc['x_axis'])
ax.set_xlabel(plot_labels['x_axis'])
ax.set_ylabel(plot_labels['y_axis'])
if new_axes:
ax.set_xlim([fmin, fmax])
ax.set_ylim(plot_range)
if show_legend:
ax.legend()
if show:
plt.show()
return fig
def plot_gap(ax, ranges, kind, kind_desc, plot_range, plot_rsc):
"""
Plot single band gap frequency ranges as rectangles.
"""
def draw_rect(ax, x, y, rsc):
ax.fill(nm.asarray(x)[[0,1,1,0]],
nm.asarray(y)[[0,0,1,1]],
**rsc)
# Colors.
strong = plot_rsc['strong_gap']
weak = plot_rsc['weak_gap']
propagation = plot_rsc['propagation']
if kind == 'p':
draw_rect(ax, ranges[0], plot_range, propagation)
elif kind == 'w':
draw_rect(ax, ranges[0], plot_range, weak)
elif kind == 'wp':
draw_rect(ax, ranges[0], plot_range, weak)
draw_rect(ax, ranges[1], plot_range, propagation)
elif kind == 's':
draw_rect(ax, ranges[0], plot_range, strong)
elif kind == 'sw':
draw_rect(ax, ranges[0], plot_range, strong)
draw_rect(ax, ranges[1], plot_range, weak)
elif kind == 'swp':
draw_rect(ax, ranges[0], plot_range, strong)
draw_rect(ax, ranges[1], plot_range, weak)
draw_rect(ax, ranges[2], plot_range, propagation)
elif kind == 'is':
draw_rect(ax, ranges[0], plot_range, strong)
elif kind == 'iw':
draw_rect(ax, ranges[0], plot_range, weak)
else:
raise ValueError('unknown band gap kind! (%s)' % kind)
def plot_gaps(fig_num, plot_rsc, gaps, kinds, gap_ranges, freq_range,
plot_range, show=False, clear=False, new_axes=False):
"""
Plot band gaps as rectangles.
"""
if plt is None: return
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
for ii in range(len(freq_range) - 1):
f0, f1 = freq_range[[ii, ii+1]]
gap = gaps[ii]
ranges = gap_ranges[ii]
if isinstance(gap, list):
for ig, (gmin, gmax) in enumerate(gap):
kind, kind_desc = kinds[ii][ig]
plot_gap(ax, ranges[ig], kind, kind_desc, plot_range, plot_rsc)
output(ii, gmin[0], gmax[0], '%.8f' % f0, '%.8f' % f1)
output(' -> %s\n %s' %(kind_desc, ranges[ig]))
else:
gmin, gmax = gap
kind, kind_desc = kinds[ii]
plot_gap(ax, ranges, kind, kind_desc, plot_range, plot_rsc)
output(ii, gmin[0], gmax[0], '%.8f' % f0, '%.8f' % f1)
output(' -> %s\n %s' %(kind_desc, ranges))
if new_axes:
ax.set_xlim([freq_range[0], freq_range[-1]])
ax.set_ylim(plot_range)
if show:
plt.show()
return fig
def _get_fig_name(output_dir, fig_name, key, common, fig_suffix):
"""
Construct the complete name of figure file.
"""
name = key.replace(common, '')
if name and (not name.startswith('_')):
name = '_' + name
fig_name = fig_name + name + fig_suffix
return op.join(output_dir, fig_name)
class AcousticBandGapsApp(HomogenizationApp):
"""
Application for computing acoustic band gaps.
"""
@staticmethod
def process_options(options):
"""
Application options setup. Sets default values for missing
non-compulsory options.
"""
get = options.get
default_plot_options = {'show' : True,'legend' : False,}
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'min eig($M^*$)',
'eig_mid' : r'mid eig($M^*$)',
'eig_max' : r'max eig($M^*$)',
'x_axis' : r'$\sqrt{\lambda}$, $\omega$',
'y_axis' : r'eigenvalues of mass matrix $M^*$',
}
plot_labels = try_set_defaults(options, 'plot_labels', aux, recur=True)
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'$\kappa$(min)',
'eig_mid' : r'$\kappa$(mid)',
'eig_max' : r'$\kappa$(max)',
'x_axis' : r'$\sqrt{\lambda}$, $\omega$',
'y_axis' : 'polarization angles',
}
plot_labels_angle = try_set_defaults(options, 'plot_labels_angle', aux)
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'wave number (min)',
'eig_mid' : r'wave number (mid)',
'eig_max' : r'wave number (max)',
'x_axis' : r'$\sqrt{\lambda}$, $\omega$',
'y_axis' : 'wave numbers',
}
plot_labels_wave = try_set_defaults(options, 'plot_labels_wave', aux)
plot_rsc = {
'resonance' : {'linewidth' : 0.5, 'color' : 'r', 'linestyle' : '-'},
'masked' : {'linewidth' : 0.5, 'color' : 'r', 'linestyle' : ':'},
'x_axis' : {'linewidth' : 0.5, 'color' : 'k', 'linestyle' : '--'},
'eig_min' : {'linewidth' : 2.0, 'color' : (0.0, 0.0, 1.0),
'linestyle' : ':' },
'eig_mid' : {'linewidth' : 2.0, 'color' : (0.0, 0.0, 0.8),
'linestyle' : '--' },
'eig_max' : {'linewidth' : 2.0, 'color' : (0.0, 0.0, 0.6),
'linestyle' : '-' },
'strong_gap' : {'linewidth' : 0, 'facecolor' : (0.2, 0.4, 0.2)},
'weak_gap' : {'linewidth' : 0, 'facecolor' : (0.6, 0.8, 0.6)},
'propagation' : {'linewidth' : 0, 'facecolor' : (1, 1, 1)},
'params' : {'axes.labelsize': 'x-large',
'font.size': 14,
'legend.fontsize': 'large',
'legend.loc': 'best',
'xtick.labelsize': 'large',
'ytick.labelsize': 'large',
'text.usetex': True},
}
plot_rsc = try_set_defaults(options, 'plot_rsc', plot_rsc)
return Struct(incident_wave_dir=get('incident_wave_dir', None),
plot_transform=get('plot_transform', None),
plot_transform_wave=get('plot_transform_wave', None),
plot_transform_angle=get('plot_transform_angle', None),
plot_options=get('plot_options', default_plot_options),
fig_name=get('fig_name', None),
fig_name_wave=get('fig_name_wave', None),
fig_name_angle=get('fig_name_angle', None),
fig_suffix=get('fig_suffix', '.pdf'),
plot_labels=plot_labels,
plot_labels_angle=plot_labels_angle,
plot_labels_wave=plot_labels_wave,
plot_rsc=plot_rsc)
@staticmethod
def process_options_pv(options):
"""
Application options setup for phase velocity computation. Sets default
values for missing non-compulsory options.
"""
get = options.get
incident_wave_dir=get('incident_wave_dir', None,
'missing "incident_wave_dir" in options!')
return Struct(incident_wave_dir=incident_wave_dir)
def __init__(self, conf, options, output_prefix, **kwargs):
PDESolverApp.__init__(self, conf, options, output_prefix,
init_equations=False)
self.setup_options()
if conf._filename:
output_dir = self.problem.output_dir
shutil.copyfile(conf._filename,
op.join(output_dir, op.basename(conf._filename)))
def setup_options(self):
HomogenizationApp.setup_options(self)
if self.options.phase_velocity:
process_options = AcousticBandGapsApp.process_options_pv
else:
process_options = AcousticBandGapsApp.process_options
self.app_options += process_options(self.conf.options)
def call(self):
"""
Construct and call the homogenization engine accoring to options.
"""
options = self.options
opts = self.app_options
conf = self.problem.conf
coefs_name = opts.coefs
coef_info = conf.get(opts.coefs, None,
'missing "%s" in problem description!'
% opts.coefs)
if options.detect_band_gaps:
# Compute band gaps coefficients and data.
keys = [key for key in coef_info if key.startswith('band_gaps')]
elif options.analyze_dispersion or options.phase_velocity:
# Insert incident wave direction to coefficients that need it.
for key, val in six.iteritems(coef_info):
coef_opts = val.get('options', None)
if coef_opts is None: continue
if (('incident_wave_dir' in coef_opts)
and (coef_opts['incident_wave_dir'] is None)):
coef_opts['incident_wave_dir'] = opts.incident_wave_dir
if options.analyze_dispersion:
# Compute dispersion coefficients and data.
keys = [key for key in coef_info
if key.startswith('dispersion')
or key.startswith('polarization_angles')]
else:
# Compute phase velocity and its requirements.
keys = [key for key in coef_info
if key.startswith('phase_velocity')]
else:
# Compute only the eigenvalue problems.
names = [req for req in conf.get(opts.requirements, [''])
if req.startswith('evp')]
coefs = {'dummy' : {'requires' : names,
'class' : CoefDummy,}}
conf.coefs_dummy = coefs
coefs_name = 'coefs_dummy'
keys = ['dummy']
he_options = Struct(coefs=coefs_name, requirements=opts.requirements,
compute_only=keys,
post_process_hook=self.post_process_hook,
multiprocessing=False)
volumes = {}
if hasattr(opts, 'volumes') and (opts.volumes is not None):
volumes.update(opts.volumes)
elif hasattr(opts, 'volume') and (opts.volume is not None):
volumes['total'] = opts.volume
else:
volumes['total'] = 1.0
he = HomogenizationEngine(self.problem, options,
app_options=he_options,
volumes=volumes)
coefs = he()
coefs = Coefficients(**coefs.to_dict())
coefs_filename = op.join(opts.output_dir, opts.coefs_filename)
coefs.to_file_txt(coefs_filename + '.txt',
opts.tex_names,
opts.float_format)
bg_keys = [key for key in coefs.to_dict()
if key.startswith('band_gaps')
or key.startswith('dispersion')]
for ii, key in enumerate(bg_keys):
bg = coefs.get(key)
log_save_name = bg.get('log_save_name', None)
if log_save_name is not None:
filename = op.join(self.problem.output_dir, log_save_name)
bg.save_log(filename, opts.float_format, bg)
raw_log_save_name = bg.get('raw_log_save_name', None)
if raw_log_save_name is not None:
filename = op.join(self.problem.output_dir, raw_log_save_name)
save_raw_bg_logs(filename, bg.logs)
if options.plot:
if options.detect_band_gaps:
self.plot_band_gaps(coefs)
elif options.analyze_dispersion:
self.plot_dispersion(coefs)
elif options.phase_velocity:
keys = [key for key in coefs.to_dict()
if key.startswith('phase_velocity')]
for key in keys:
output('%s:' % key, coefs.get(key))
return coefs
def plot_band_gaps(self, coefs):
opts = self.app_options
bg_keys = [key for key in coefs.to_dict()
if key.startswith('band_gaps')]
plot_opts = opts.plot_options
plot_rsc = opts.plot_rsc
plt.rcParams.update(plot_rsc['params'])
for ii, key in enumerate(bg_keys):
bg = coefs.get(key)
plot_labels = opts.plot_labels.get(key, opts.plot_labels)
plot_range, teigs = transform_plot_data(bg.logs.eigs,
opts.plot_transform,
self.conf)
fig = plot_gaps(ii, plot_rsc, bg.gaps, bg.kinds, bg.gap_ranges,
bg.freq_range_margins, plot_range,
clear=True)
fig = plot_logs(ii, plot_rsc, plot_labels, bg.logs.freqs, teigs,
bg.valid[bg.eig_range],
bg.freq_range_initial,
plot_range,
show_legend=plot_opts['legend'],
new_axes=True)
plt.tight_layout()
if opts.fig_name is not None:
fig_name = _get_fig_name(self.problem.output_dir, opts.fig_name,
key, 'band_gaps', opts.fig_suffix)
fig.savefig(fig_name)
if plot_opts['show']:
plt.show()
def plot_dispersion(self, coefs):
opts = self.app_options
bg_keys = [key for key in coefs.to_dict()
if key.startswith('dispersion')]
plot_rsc = opts.plot_rsc
plot_opts = opts.plot_options
plt.rcParams.update(plot_rsc['params'])
plot_labels = opts.plot_labels_angle
for ii, key in enumerate(bg_keys):
pas_key = key.replace('dispersion', 'polarization_angles')
pas = coefs.get(pas_key)
aux = transform_plot_data(pas,
opts.plot_transform_angle,
self.conf)
plot_range, pas = aux
bg = coefs.get(key)
fig = plot_gaps(1, plot_rsc, bg.gaps, bg.kinds, bg.gap_ranges,
bg.freq_range_margins, plot_range,
clear=True)
fig = plot_logs(1, plot_rsc, plot_labels, bg.logs.freqs, pas,
bg.valid[bg.eig_range],
bg.freq_range_initial,
plot_range,
show_legend=plot_opts['legend'],
new_axes=True)
plt.tight_layout()
fig_name = opts.fig_name_angle
if fig_name is not None:
fig_name = _get_fig_name(self.problem.output_dir, fig_name,
key, 'dispersion', opts.fig_suffix)
fig.savefig(fig_name)
aux = transform_plot_data(bg.logs.eigs,
opts.plot_transform_wave,
self.conf)
plot_range, teigs = aux
plot_labels = opts.plot_labels_wave
fig = plot_gaps(2, plot_rsc, bg.gaps, bg.kinds, bg.gap_ranges,
bg.freq_range_margins, plot_range,
clear=True)
fig = plot_logs(2, plot_rsc, plot_labels, bg.logs.freqs, teigs,
bg.valid[bg.eig_range],
bg.freq_range_initial,
plot_range,
show_legend=plot_opts['legend'],
new_axes=True)
| plt.tight_layout() | sfepy.base.plotutils.plt.tight_layout |
from __future__ import absolute_import
import os.path as op
import shutil
import numpy as nm
from sfepy.base.base import ordered_iteritems, output, set_defaults, assert_
from sfepy.base.base import Struct
from sfepy.homogenization.engine import HomogenizationEngine
from sfepy.homogenization.homogen_app import HomogenizationApp
from sfepy.homogenization.coefficients import Coefficients
from sfepy.homogenization.coefs_base import CoefDummy
from sfepy.applications import PDESolverApp
from sfepy.base.plotutils import plt
import six
from six.moves import range
def try_set_defaults(obj, attr, defaults, recur=False):
try:
values = getattr(obj, attr)
except:
values = defaults
else:
if recur and isinstance(values, dict):
for key, val in six.iteritems(values):
set_defaults(val, defaults)
else:
set_defaults(values, defaults)
return values
def save_raw_bg_logs(filename, logs):
"""
Save raw band gaps `logs` into the `filename` file.
"""
out = {}
iranges = nm.cumsum([0] + [len(ii) for ii in logs.freqs])
out['iranges'] = iranges
for key, log in ordered_iteritems(logs.to_dict()):
out[key] = nm.concatenate(log, axis=0)
with open(filename, 'w') as fd:
nm.savez(fd, **out)
def transform_plot_data(datas, plot_transform, conf):
if plot_transform is not None:
fun = conf.get_function(plot_transform[0])
dmin, dmax = 1e+10, -1e+10
tdatas = []
for data in datas:
tdata = data.copy()
if plot_transform is not None:
tdata = fun(tdata, *plot_transform[1:])
dmin = min(dmin, nm.nanmin(tdata))
dmax = max(dmax, nm.nanmax(tdata))
tdatas.append(tdata)
dmin, dmax = min(dmax - 1e-8, dmin), max(dmin + 1e-8, dmax)
return (dmin, dmax), tdatas
def plot_eigs(fig_num, plot_rsc, plot_labels, valid, freq_range, plot_range,
show=False, clear=False, new_axes=False):
"""
Plot resonance/eigen-frequencies.
`valid` must correspond to `freq_range`
resonances : red
masked resonances: dotted red
"""
if plt is None: return
assert_(len(valid) == len(freq_range))
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
l0 = l1 = None
for ii, f in enumerate(freq_range):
if valid[ii]:
l0 = ax.plot([f, f], plot_range, **plot_rsc['resonance'])[0]
else:
l1 = ax.plot([f, f], plot_range, **plot_rsc['masked'])[0]
if l0:
l0.set_label(plot_labels['resonance'])
if l1:
l1.set_label(plot_labels['masked'])
if new_axes:
ax.set_xlim([freq_range[0], freq_range[-1]])
ax.set_ylim(plot_range)
if show:
plt.show()
return fig
def plot_logs(fig_num, plot_rsc, plot_labels,
freqs, logs, valid, freq_range, plot_range,
draw_eigs=True, show_legend=True, show=False,
clear=False, new_axes=False):
"""
Plot logs of min/middle/max eigs of a mass matrix.
"""
if plt is None: return
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
if draw_eigs:
plot_eigs(fig_num, plot_rsc, plot_labels, valid, freq_range,
plot_range)
for ii, log in enumerate(logs):
l1 = ax.plot(freqs[ii], log[:, -1], **plot_rsc['eig_max'])
if log.shape[1] >= 2:
l2 = ax.plot(freqs[ii], log[:, 0], **plot_rsc['eig_min'])
else:
l2 = None
if log.shape[1] == 3:
l3 = ax.plot(freqs[ii], log[:, 1], **plot_rsc['eig_mid'])
else:
l3 = None
l1[0].set_label(plot_labels['eig_max'])
if l2:
l2[0].set_label(plot_labels['eig_min'])
if l3:
l3[0].set_label(plot_labels['eig_mid'])
fmin, fmax = freqs[0][0], freqs[-1][-1]
ax.plot([fmin, fmax], [0, 0], **plot_rsc['x_axis'])
ax.set_xlabel(plot_labels['x_axis'])
ax.set_ylabel(plot_labels['y_axis'])
if new_axes:
ax.set_xlim([fmin, fmax])
ax.set_ylim(plot_range)
if show_legend:
ax.legend()
if show:
plt.show()
return fig
def plot_gap(ax, ranges, kind, kind_desc, plot_range, plot_rsc):
"""
Plot single band gap frequency ranges as rectangles.
"""
def draw_rect(ax, x, y, rsc):
ax.fill(nm.asarray(x)[[0,1,1,0]],
nm.asarray(y)[[0,0,1,1]],
**rsc)
# Colors.
strong = plot_rsc['strong_gap']
weak = plot_rsc['weak_gap']
propagation = plot_rsc['propagation']
if kind == 'p':
draw_rect(ax, ranges[0], plot_range, propagation)
elif kind == 'w':
draw_rect(ax, ranges[0], plot_range, weak)
elif kind == 'wp':
draw_rect(ax, ranges[0], plot_range, weak)
draw_rect(ax, ranges[1], plot_range, propagation)
elif kind == 's':
draw_rect(ax, ranges[0], plot_range, strong)
elif kind == 'sw':
draw_rect(ax, ranges[0], plot_range, strong)
draw_rect(ax, ranges[1], plot_range, weak)
elif kind == 'swp':
draw_rect(ax, ranges[0], plot_range, strong)
draw_rect(ax, ranges[1], plot_range, weak)
draw_rect(ax, ranges[2], plot_range, propagation)
elif kind == 'is':
draw_rect(ax, ranges[0], plot_range, strong)
elif kind == 'iw':
draw_rect(ax, ranges[0], plot_range, weak)
else:
raise ValueError('unknown band gap kind! (%s)' % kind)
def plot_gaps(fig_num, plot_rsc, gaps, kinds, gap_ranges, freq_range,
plot_range, show=False, clear=False, new_axes=False):
"""
Plot band gaps as rectangles.
"""
if plt is None: return
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
for ii in range(len(freq_range) - 1):
f0, f1 = freq_range[[ii, ii+1]]
gap = gaps[ii]
ranges = gap_ranges[ii]
if isinstance(gap, list):
for ig, (gmin, gmax) in enumerate(gap):
kind, kind_desc = kinds[ii][ig]
plot_gap(ax, ranges[ig], kind, kind_desc, plot_range, plot_rsc)
output(ii, gmin[0], gmax[0], '%.8f' % f0, '%.8f' % f1)
output(' -> %s\n %s' %(kind_desc, ranges[ig]))
else:
gmin, gmax = gap
kind, kind_desc = kinds[ii]
plot_gap(ax, ranges, kind, kind_desc, plot_range, plot_rsc)
output(ii, gmin[0], gmax[0], '%.8f' % f0, '%.8f' % f1)
output(' -> %s\n %s' %(kind_desc, ranges))
if new_axes:
ax.set_xlim([freq_range[0], freq_range[-1]])
ax.set_ylim(plot_range)
if show:
plt.show()
return fig
def _get_fig_name(output_dir, fig_name, key, common, fig_suffix):
"""
Construct the complete name of figure file.
"""
name = key.replace(common, '')
if name and (not name.startswith('_')):
name = '_' + name
fig_name = fig_name + name + fig_suffix
return op.join(output_dir, fig_name)
class AcousticBandGapsApp(HomogenizationApp):
"""
Application for computing acoustic band gaps.
"""
@staticmethod
def process_options(options):
"""
Application options setup. Sets default values for missing
non-compulsory options.
"""
get = options.get
default_plot_options = {'show' : True,'legend' : False,}
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'min eig($M^*$)',
'eig_mid' : r'mid eig($M^*$)',
'eig_max' : r'max eig($M^*$)',
'x_axis' : r'$\sqrt{\lambda}$, $\omega$',
'y_axis' : r'eigenvalues of mass matrix $M^*$',
}
plot_labels = try_set_defaults(options, 'plot_labels', aux, recur=True)
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'$\kappa$(min)',
'eig_mid' : r'$\kappa$(mid)',
'eig_max' : r'$\kappa$(max)',
'x_axis' : r'$\sqrt{\lambda}$, $\omega$',
'y_axis' : 'polarization angles',
}
plot_labels_angle = try_set_defaults(options, 'plot_labels_angle', aux)
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'wave number (min)',
'eig_mid' : r'wave number (mid)',
'eig_max' : r'wave number (max)',
'x_axis' : r'$\sqrt{\lambda}$, $\omega$',
'y_axis' : 'wave numbers',
}
plot_labels_wave = try_set_defaults(options, 'plot_labels_wave', aux)
plot_rsc = {
'resonance' : {'linewidth' : 0.5, 'color' : 'r', 'linestyle' : '-'},
'masked' : {'linewidth' : 0.5, 'color' : 'r', 'linestyle' : ':'},
'x_axis' : {'linewidth' : 0.5, 'color' : 'k', 'linestyle' : '--'},
'eig_min' : {'linewidth' : 2.0, 'color' : (0.0, 0.0, 1.0),
'linestyle' : ':' },
'eig_mid' : {'linewidth' : 2.0, 'color' : (0.0, 0.0, 0.8),
'linestyle' : '--' },
'eig_max' : {'linewidth' : 2.0, 'color' : (0.0, 0.0, 0.6),
'linestyle' : '-' },
'strong_gap' : {'linewidth' : 0, 'facecolor' : (0.2, 0.4, 0.2)},
'weak_gap' : {'linewidth' : 0, 'facecolor' : (0.6, 0.8, 0.6)},
'propagation' : {'linewidth' : 0, 'facecolor' : (1, 1, 1)},
'params' : {'axes.labelsize': 'x-large',
'font.size': 14,
'legend.fontsize': 'large',
'legend.loc': 'best',
'xtick.labelsize': 'large',
'ytick.labelsize': 'large',
'text.usetex': True},
}
plot_rsc = try_set_defaults(options, 'plot_rsc', plot_rsc)
return Struct(incident_wave_dir=get('incident_wave_dir', None),
plot_transform=get('plot_transform', None),
plot_transform_wave=get('plot_transform_wave', None),
plot_transform_angle=get('plot_transform_angle', None),
plot_options=get('plot_options', default_plot_options),
fig_name=get('fig_name', None),
fig_name_wave=get('fig_name_wave', None),
fig_name_angle=get('fig_name_angle', None),
fig_suffix=get('fig_suffix', '.pdf'),
plot_labels=plot_labels,
plot_labels_angle=plot_labels_angle,
plot_labels_wave=plot_labels_wave,
plot_rsc=plot_rsc)
@staticmethod
def process_options_pv(options):
"""
Application options setup for phase velocity computation. Sets default
values for missing non-compulsory options.
"""
get = options.get
incident_wave_dir=get('incident_wave_dir', None,
'missing "incident_wave_dir" in options!')
return Struct(incident_wave_dir=incident_wave_dir)
def __init__(self, conf, options, output_prefix, **kwargs):
PDESolverApp.__init__(self, conf, options, output_prefix,
init_equations=False)
self.setup_options()
if conf._filename:
output_dir = self.problem.output_dir
shutil.copyfile(conf._filename,
op.join(output_dir, op.basename(conf._filename)))
def setup_options(self):
HomogenizationApp.setup_options(self)
if self.options.phase_velocity:
process_options = AcousticBandGapsApp.process_options_pv
else:
process_options = AcousticBandGapsApp.process_options
self.app_options += process_options(self.conf.options)
def call(self):
"""
Construct and call the homogenization engine accoring to options.
"""
options = self.options
opts = self.app_options
conf = self.problem.conf
coefs_name = opts.coefs
coef_info = conf.get(opts.coefs, None,
'missing "%s" in problem description!'
% opts.coefs)
if options.detect_band_gaps:
# Compute band gaps coefficients and data.
keys = [key for key in coef_info if key.startswith('band_gaps')]
elif options.analyze_dispersion or options.phase_velocity:
# Insert incident wave direction to coefficients that need it.
for key, val in six.iteritems(coef_info):
coef_opts = val.get('options', None)
if coef_opts is None: continue
if (('incident_wave_dir' in coef_opts)
and (coef_opts['incident_wave_dir'] is None)):
coef_opts['incident_wave_dir'] = opts.incident_wave_dir
if options.analyze_dispersion:
# Compute dispersion coefficients and data.
keys = [key for key in coef_info
if key.startswith('dispersion')
or key.startswith('polarization_angles')]
else:
# Compute phase velocity and its requirements.
keys = [key for key in coef_info
if key.startswith('phase_velocity')]
else:
# Compute only the eigenvalue problems.
names = [req for req in conf.get(opts.requirements, [''])
if req.startswith('evp')]
coefs = {'dummy' : {'requires' : names,
'class' : CoefDummy,}}
conf.coefs_dummy = coefs
coefs_name = 'coefs_dummy'
keys = ['dummy']
he_options = Struct(coefs=coefs_name, requirements=opts.requirements,
compute_only=keys,
post_process_hook=self.post_process_hook,
multiprocessing=False)
volumes = {}
if hasattr(opts, 'volumes') and (opts.volumes is not None):
volumes.update(opts.volumes)
elif hasattr(opts, 'volume') and (opts.volume is not None):
volumes['total'] = opts.volume
else:
volumes['total'] = 1.0
he = HomogenizationEngine(self.problem, options,
app_options=he_options,
volumes=volumes)
coefs = he()
coefs = Coefficients(**coefs.to_dict())
coefs_filename = op.join(opts.output_dir, opts.coefs_filename)
coefs.to_file_txt(coefs_filename + '.txt',
opts.tex_names,
opts.float_format)
bg_keys = [key for key in coefs.to_dict()
if key.startswith('band_gaps')
or key.startswith('dispersion')]
for ii, key in enumerate(bg_keys):
bg = coefs.get(key)
log_save_name = bg.get('log_save_name', None)
if log_save_name is not None:
filename = op.join(self.problem.output_dir, log_save_name)
bg.save_log(filename, opts.float_format, bg)
raw_log_save_name = bg.get('raw_log_save_name', None)
if raw_log_save_name is not None:
filename = op.join(self.problem.output_dir, raw_log_save_name)
save_raw_bg_logs(filename, bg.logs)
if options.plot:
if options.detect_band_gaps:
self.plot_band_gaps(coefs)
elif options.analyze_dispersion:
self.plot_dispersion(coefs)
elif options.phase_velocity:
keys = [key for key in coefs.to_dict()
if key.startswith('phase_velocity')]
for key in keys:
output('%s:' % key, coefs.get(key))
return coefs
def plot_band_gaps(self, coefs):
opts = self.app_options
bg_keys = [key for key in coefs.to_dict()
if key.startswith('band_gaps')]
plot_opts = opts.plot_options
plot_rsc = opts.plot_rsc
plt.rcParams.update(plot_rsc['params'])
for ii, key in enumerate(bg_keys):
bg = coefs.get(key)
plot_labels = opts.plot_labels.get(key, opts.plot_labels)
plot_range, teigs = transform_plot_data(bg.logs.eigs,
opts.plot_transform,
self.conf)
fig = plot_gaps(ii, plot_rsc, bg.gaps, bg.kinds, bg.gap_ranges,
bg.freq_range_margins, plot_range,
clear=True)
fig = plot_logs(ii, plot_rsc, plot_labels, bg.logs.freqs, teigs,
bg.valid[bg.eig_range],
bg.freq_range_initial,
plot_range,
show_legend=plot_opts['legend'],
new_axes=True)
plt.tight_layout()
if opts.fig_name is not None:
fig_name = _get_fig_name(self.problem.output_dir, opts.fig_name,
key, 'band_gaps', opts.fig_suffix)
fig.savefig(fig_name)
if plot_opts['show']:
plt.show()
def plot_dispersion(self, coefs):
opts = self.app_options
bg_keys = [key for key in coefs.to_dict()
if key.startswith('dispersion')]
plot_rsc = opts.plot_rsc
plot_opts = opts.plot_options
plt.rcParams.update(plot_rsc['params'])
plot_labels = opts.plot_labels_angle
for ii, key in enumerate(bg_keys):
pas_key = key.replace('dispersion', 'polarization_angles')
pas = coefs.get(pas_key)
aux = transform_plot_data(pas,
opts.plot_transform_angle,
self.conf)
plot_range, pas = aux
bg = coefs.get(key)
fig = plot_gaps(1, plot_rsc, bg.gaps, bg.kinds, bg.gap_ranges,
bg.freq_range_margins, plot_range,
clear=True)
fig = plot_logs(1, plot_rsc, plot_labels, bg.logs.freqs, pas,
bg.valid[bg.eig_range],
bg.freq_range_initial,
plot_range,
show_legend=plot_opts['legend'],
new_axes=True)
plt.tight_layout()
fig_name = opts.fig_name_angle
if fig_name is not None:
fig_name = _get_fig_name(self.problem.output_dir, fig_name,
key, 'dispersion', opts.fig_suffix)
fig.savefig(fig_name)
aux = transform_plot_data(bg.logs.eigs,
opts.plot_transform_wave,
self.conf)
plot_range, teigs = aux
plot_labels = opts.plot_labels_wave
fig = plot_gaps(2, plot_rsc, bg.gaps, bg.kinds, bg.gap_ranges,
bg.freq_range_margins, plot_range,
clear=True)
fig = plot_logs(2, plot_rsc, plot_labels, bg.logs.freqs, teigs,
bg.valid[bg.eig_range],
bg.freq_range_initial,
plot_range,
show_legend=plot_opts['legend'],
new_axes=True)
plt.tight_layout()
fig_name = opts.fig_name_wave
if fig_name is not None:
fig_name = _get_fig_name(self.problem.output_dir, fig_name,
key, 'dispersion', opts.fig_suffix)
fig.savefig(fig_name)
if plot_opts['show']:
| plt.show() | sfepy.base.plotutils.plt.show |
from __future__ import absolute_import
import os.path as op
import shutil
import numpy as nm
from sfepy.base.base import ordered_iteritems, output, set_defaults, assert_
from sfepy.base.base import Struct
from sfepy.homogenization.engine import HomogenizationEngine
from sfepy.homogenization.homogen_app import HomogenizationApp
from sfepy.homogenization.coefficients import Coefficients
from sfepy.homogenization.coefs_base import CoefDummy
from sfepy.applications import PDESolverApp
from sfepy.base.plotutils import plt
import six
from six.moves import range
def try_set_defaults(obj, attr, defaults, recur=False):
try:
values = getattr(obj, attr)
except:
values = defaults
else:
if recur and isinstance(values, dict):
for key, val in six.iteritems(values):
| set_defaults(val, defaults) | sfepy.base.base.set_defaults |
from __future__ import absolute_import
import os.path as op
import shutil
import numpy as nm
from sfepy.base.base import ordered_iteritems, output, set_defaults, assert_
from sfepy.base.base import Struct
from sfepy.homogenization.engine import HomogenizationEngine
from sfepy.homogenization.homogen_app import HomogenizationApp
from sfepy.homogenization.coefficients import Coefficients
from sfepy.homogenization.coefs_base import CoefDummy
from sfepy.applications import PDESolverApp
from sfepy.base.plotutils import plt
import six
from six.moves import range
def try_set_defaults(obj, attr, defaults, recur=False):
try:
values = getattr(obj, attr)
except:
values = defaults
else:
if recur and isinstance(values, dict):
for key, val in six.iteritems(values):
set_defaults(val, defaults)
else:
set_defaults(values, defaults)
return values
def save_raw_bg_logs(filename, logs):
"""
Save raw band gaps `logs` into the `filename` file.
"""
out = {}
iranges = nm.cumsum([0] + [len(ii) for ii in logs.freqs])
out['iranges'] = iranges
for key, log in ordered_iteritems(logs.to_dict()):
out[key] = nm.concatenate(log, axis=0)
with open(filename, 'w') as fd:
nm.savez(fd, **out)
def transform_plot_data(datas, plot_transform, conf):
if plot_transform is not None:
fun = conf.get_function(plot_transform[0])
dmin, dmax = 1e+10, -1e+10
tdatas = []
for data in datas:
tdata = data.copy()
if plot_transform is not None:
tdata = fun(tdata, *plot_transform[1:])
dmin = min(dmin, nm.nanmin(tdata))
dmax = max(dmax, nm.nanmax(tdata))
tdatas.append(tdata)
dmin, dmax = min(dmax - 1e-8, dmin), max(dmin + 1e-8, dmax)
return (dmin, dmax), tdatas
def plot_eigs(fig_num, plot_rsc, plot_labels, valid, freq_range, plot_range,
show=False, clear=False, new_axes=False):
"""
Plot resonance/eigen-frequencies.
`valid` must correspond to `freq_range`
resonances : red
masked resonances: dotted red
"""
if plt is None: return
assert_(len(valid) == len(freq_range))
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
l0 = l1 = None
for ii, f in enumerate(freq_range):
if valid[ii]:
l0 = ax.plot([f, f], plot_range, **plot_rsc['resonance'])[0]
else:
l1 = ax.plot([f, f], plot_range, **plot_rsc['masked'])[0]
if l0:
l0.set_label(plot_labels['resonance'])
if l1:
l1.set_label(plot_labels['masked'])
if new_axes:
ax.set_xlim([freq_range[0], freq_range[-1]])
ax.set_ylim(plot_range)
if show:
plt.show()
return fig
def plot_logs(fig_num, plot_rsc, plot_labels,
freqs, logs, valid, freq_range, plot_range,
draw_eigs=True, show_legend=True, show=False,
clear=False, new_axes=False):
"""
Plot logs of min/middle/max eigs of a mass matrix.
"""
if plt is None: return
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
if draw_eigs:
plot_eigs(fig_num, plot_rsc, plot_labels, valid, freq_range,
plot_range)
for ii, log in enumerate(logs):
l1 = ax.plot(freqs[ii], log[:, -1], **plot_rsc['eig_max'])
if log.shape[1] >= 2:
l2 = ax.plot(freqs[ii], log[:, 0], **plot_rsc['eig_min'])
else:
l2 = None
if log.shape[1] == 3:
l3 = ax.plot(freqs[ii], log[:, 1], **plot_rsc['eig_mid'])
else:
l3 = None
l1[0].set_label(plot_labels['eig_max'])
if l2:
l2[0].set_label(plot_labels['eig_min'])
if l3:
l3[0].set_label(plot_labels['eig_mid'])
fmin, fmax = freqs[0][0], freqs[-1][-1]
ax.plot([fmin, fmax], [0, 0], **plot_rsc['x_axis'])
ax.set_xlabel(plot_labels['x_axis'])
ax.set_ylabel(plot_labels['y_axis'])
if new_axes:
ax.set_xlim([fmin, fmax])
ax.set_ylim(plot_range)
if show_legend:
ax.legend()
if show:
plt.show()
return fig
def plot_gap(ax, ranges, kind, kind_desc, plot_range, plot_rsc):
"""
Plot single band gap frequency ranges as rectangles.
"""
def draw_rect(ax, x, y, rsc):
ax.fill(nm.asarray(x)[[0,1,1,0]],
nm.asarray(y)[[0,0,1,1]],
**rsc)
# Colors.
strong = plot_rsc['strong_gap']
weak = plot_rsc['weak_gap']
propagation = plot_rsc['propagation']
if kind == 'p':
draw_rect(ax, ranges[0], plot_range, propagation)
elif kind == 'w':
draw_rect(ax, ranges[0], plot_range, weak)
elif kind == 'wp':
draw_rect(ax, ranges[0], plot_range, weak)
draw_rect(ax, ranges[1], plot_range, propagation)
elif kind == 's':
draw_rect(ax, ranges[0], plot_range, strong)
elif kind == 'sw':
draw_rect(ax, ranges[0], plot_range, strong)
draw_rect(ax, ranges[1], plot_range, weak)
elif kind == 'swp':
draw_rect(ax, ranges[0], plot_range, strong)
draw_rect(ax, ranges[1], plot_range, weak)
draw_rect(ax, ranges[2], plot_range, propagation)
elif kind == 'is':
draw_rect(ax, ranges[0], plot_range, strong)
elif kind == 'iw':
draw_rect(ax, ranges[0], plot_range, weak)
else:
raise ValueError('unknown band gap kind! (%s)' % kind)
def plot_gaps(fig_num, plot_rsc, gaps, kinds, gap_ranges, freq_range,
plot_range, show=False, clear=False, new_axes=False):
"""
Plot band gaps as rectangles.
"""
if plt is None: return
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
for ii in range(len(freq_range) - 1):
f0, f1 = freq_range[[ii, ii+1]]
gap = gaps[ii]
ranges = gap_ranges[ii]
if isinstance(gap, list):
for ig, (gmin, gmax) in enumerate(gap):
kind, kind_desc = kinds[ii][ig]
plot_gap(ax, ranges[ig], kind, kind_desc, plot_range, plot_rsc)
| output(ii, gmin[0], gmax[0], '%.8f' % f0, '%.8f' % f1) | sfepy.base.base.output |
from __future__ import absolute_import
import os.path as op
import shutil
import numpy as nm
from sfepy.base.base import ordered_iteritems, output, set_defaults, assert_
from sfepy.base.base import Struct
from sfepy.homogenization.engine import HomogenizationEngine
from sfepy.homogenization.homogen_app import HomogenizationApp
from sfepy.homogenization.coefficients import Coefficients
from sfepy.homogenization.coefs_base import CoefDummy
from sfepy.applications import PDESolverApp
from sfepy.base.plotutils import plt
import six
from six.moves import range
def try_set_defaults(obj, attr, defaults, recur=False):
try:
values = getattr(obj, attr)
except:
values = defaults
else:
if recur and isinstance(values, dict):
for key, val in six.iteritems(values):
set_defaults(val, defaults)
else:
set_defaults(values, defaults)
return values
def save_raw_bg_logs(filename, logs):
"""
Save raw band gaps `logs` into the `filename` file.
"""
out = {}
iranges = nm.cumsum([0] + [len(ii) for ii in logs.freqs])
out['iranges'] = iranges
for key, log in ordered_iteritems(logs.to_dict()):
out[key] = nm.concatenate(log, axis=0)
with open(filename, 'w') as fd:
nm.savez(fd, **out)
def transform_plot_data(datas, plot_transform, conf):
if plot_transform is not None:
fun = conf.get_function(plot_transform[0])
dmin, dmax = 1e+10, -1e+10
tdatas = []
for data in datas:
tdata = data.copy()
if plot_transform is not None:
tdata = fun(tdata, *plot_transform[1:])
dmin = min(dmin, nm.nanmin(tdata))
dmax = max(dmax, nm.nanmax(tdata))
tdatas.append(tdata)
dmin, dmax = min(dmax - 1e-8, dmin), max(dmin + 1e-8, dmax)
return (dmin, dmax), tdatas
def plot_eigs(fig_num, plot_rsc, plot_labels, valid, freq_range, plot_range,
show=False, clear=False, new_axes=False):
"""
Plot resonance/eigen-frequencies.
`valid` must correspond to `freq_range`
resonances : red
masked resonances: dotted red
"""
if plt is None: return
assert_(len(valid) == len(freq_range))
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
l0 = l1 = None
for ii, f in enumerate(freq_range):
if valid[ii]:
l0 = ax.plot([f, f], plot_range, **plot_rsc['resonance'])[0]
else:
l1 = ax.plot([f, f], plot_range, **plot_rsc['masked'])[0]
if l0:
l0.set_label(plot_labels['resonance'])
if l1:
l1.set_label(plot_labels['masked'])
if new_axes:
ax.set_xlim([freq_range[0], freq_range[-1]])
ax.set_ylim(plot_range)
if show:
plt.show()
return fig
def plot_logs(fig_num, plot_rsc, plot_labels,
freqs, logs, valid, freq_range, plot_range,
draw_eigs=True, show_legend=True, show=False,
clear=False, new_axes=False):
"""
Plot logs of min/middle/max eigs of a mass matrix.
"""
if plt is None: return
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
if draw_eigs:
plot_eigs(fig_num, plot_rsc, plot_labels, valid, freq_range,
plot_range)
for ii, log in enumerate(logs):
l1 = ax.plot(freqs[ii], log[:, -1], **plot_rsc['eig_max'])
if log.shape[1] >= 2:
l2 = ax.plot(freqs[ii], log[:, 0], **plot_rsc['eig_min'])
else:
l2 = None
if log.shape[1] == 3:
l3 = ax.plot(freqs[ii], log[:, 1], **plot_rsc['eig_mid'])
else:
l3 = None
l1[0].set_label(plot_labels['eig_max'])
if l2:
l2[0].set_label(plot_labels['eig_min'])
if l3:
l3[0].set_label(plot_labels['eig_mid'])
fmin, fmax = freqs[0][0], freqs[-1][-1]
ax.plot([fmin, fmax], [0, 0], **plot_rsc['x_axis'])
ax.set_xlabel(plot_labels['x_axis'])
ax.set_ylabel(plot_labels['y_axis'])
if new_axes:
ax.set_xlim([fmin, fmax])
ax.set_ylim(plot_range)
if show_legend:
ax.legend()
if show:
plt.show()
return fig
def plot_gap(ax, ranges, kind, kind_desc, plot_range, plot_rsc):
"""
Plot single band gap frequency ranges as rectangles.
"""
def draw_rect(ax, x, y, rsc):
ax.fill(nm.asarray(x)[[0,1,1,0]],
nm.asarray(y)[[0,0,1,1]],
**rsc)
# Colors.
strong = plot_rsc['strong_gap']
weak = plot_rsc['weak_gap']
propagation = plot_rsc['propagation']
if kind == 'p':
draw_rect(ax, ranges[0], plot_range, propagation)
elif kind == 'w':
draw_rect(ax, ranges[0], plot_range, weak)
elif kind == 'wp':
draw_rect(ax, ranges[0], plot_range, weak)
draw_rect(ax, ranges[1], plot_range, propagation)
elif kind == 's':
draw_rect(ax, ranges[0], plot_range, strong)
elif kind == 'sw':
draw_rect(ax, ranges[0], plot_range, strong)
draw_rect(ax, ranges[1], plot_range, weak)
elif kind == 'swp':
draw_rect(ax, ranges[0], plot_range, strong)
draw_rect(ax, ranges[1], plot_range, weak)
draw_rect(ax, ranges[2], plot_range, propagation)
elif kind == 'is':
draw_rect(ax, ranges[0], plot_range, strong)
elif kind == 'iw':
draw_rect(ax, ranges[0], plot_range, weak)
else:
raise ValueError('unknown band gap kind! (%s)' % kind)
def plot_gaps(fig_num, plot_rsc, gaps, kinds, gap_ranges, freq_range,
plot_range, show=False, clear=False, new_axes=False):
"""
Plot band gaps as rectangles.
"""
if plt is None: return
fig = plt.figure(fig_num)
if clear:
fig.clf()
if new_axes:
ax = fig.add_subplot(111)
else:
ax = fig.gca()
for ii in range(len(freq_range) - 1):
f0, f1 = freq_range[[ii, ii+1]]
gap = gaps[ii]
ranges = gap_ranges[ii]
if isinstance(gap, list):
for ig, (gmin, gmax) in enumerate(gap):
kind, kind_desc = kinds[ii][ig]
plot_gap(ax, ranges[ig], kind, kind_desc, plot_range, plot_rsc)
output(ii, gmin[0], gmax[0], '%.8f' % f0, '%.8f' % f1)
| output(' -> %s\n %s' %(kind_desc, ranges[ig])) | sfepy.base.base.output |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
mesh = | Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh') | sfepy.discrete.fem.Mesh.from_file |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
domain = | FEDomain('domain', mesh) | sfepy.discrete.fem.FEDomain |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
domain = FEDomain('domain', mesh)
min_x, max_x = domain.get_mesh_bounding_box()[:,0]
eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
gamma1 = domain.create_region('Gamma1',
'vertices in x < %.10f' % (min_x + eps),
'facet')
gamma2 = domain.create_region('Gamma2',
'vertices in x > %.10f' % (max_x - eps),
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=2)
u = | FieldVariable('u', 'unknown', field) | sfepy.discrete.FieldVariable |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
domain = FEDomain('domain', mesh)
min_x, max_x = domain.get_mesh_bounding_box()[:,0]
eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
gamma1 = domain.create_region('Gamma1',
'vertices in x < %.10f' % (min_x + eps),
'facet')
gamma2 = domain.create_region('Gamma2',
'vertices in x > %.10f' % (max_x - eps),
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=2)
u = FieldVariable('u', 'unknown', field)
v = | FieldVariable('v', 'test', field, primary_var_name='u') | sfepy.discrete.FieldVariable |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
domain = FEDomain('domain', mesh)
min_x, max_x = domain.get_mesh_bounding_box()[:,0]
eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
gamma1 = domain.create_region('Gamma1',
'vertices in x < %.10f' % (min_x + eps),
'facet')
gamma2 = domain.create_region('Gamma2',
'vertices in x > %.10f' % (max_x - eps),
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=2)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
m = Material('m', D=stiffness_from_lame(dim=2, lam=1.0, mu=1.0))
f = | Material('f', val=[[0.02], [0.01]]) | sfepy.discrete.Material |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
domain = FEDomain('domain', mesh)
min_x, max_x = domain.get_mesh_bounding_box()[:,0]
eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
gamma1 = domain.create_region('Gamma1',
'vertices in x < %.10f' % (min_x + eps),
'facet')
gamma2 = domain.create_region('Gamma2',
'vertices in x > %.10f' % (max_x - eps),
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=2)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
m = Material('m', D=stiffness_from_lame(dim=2, lam=1.0, mu=1.0))
f = Material('f', val=[[0.02], [0.01]])
integral = | Integral('i', order=3) | sfepy.discrete.Integral |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
domain = FEDomain('domain', mesh)
min_x, max_x = domain.get_mesh_bounding_box()[:,0]
eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
gamma1 = domain.create_region('Gamma1',
'vertices in x < %.10f' % (min_x + eps),
'facet')
gamma2 = domain.create_region('Gamma2',
'vertices in x > %.10f' % (max_x - eps),
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=2)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
m = Material('m', D=stiffness_from_lame(dim=2, lam=1.0, mu=1.0))
f = Material('f', val=[[0.02], [0.01]])
integral = Integral('i', order=3)
t1 = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
t2 = | Term.new('dw_volume_lvf(f.val, v)', integral, omega, f=f, v=v) | sfepy.terms.Term.new |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
domain = FEDomain('domain', mesh)
min_x, max_x = domain.get_mesh_bounding_box()[:,0]
eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
gamma1 = domain.create_region('Gamma1',
'vertices in x < %.10f' % (min_x + eps),
'facet')
gamma2 = domain.create_region('Gamma2',
'vertices in x > %.10f' % (max_x - eps),
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=2)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
m = Material('m', D=stiffness_from_lame(dim=2, lam=1.0, mu=1.0))
f = Material('f', val=[[0.02], [0.01]])
integral = Integral('i', order=3)
t1 = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
t2 = Term.new('dw_volume_lvf(f.val, v)', integral, omega, f=f, v=v)
eq = | Equation('balance', t1 + t2) | sfepy.discrete.Equation |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
domain = FEDomain('domain', mesh)
min_x, max_x = domain.get_mesh_bounding_box()[:,0]
eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
gamma1 = domain.create_region('Gamma1',
'vertices in x < %.10f' % (min_x + eps),
'facet')
gamma2 = domain.create_region('Gamma2',
'vertices in x > %.10f' % (max_x - eps),
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=2)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
m = Material('m', D=stiffness_from_lame(dim=2, lam=1.0, mu=1.0))
f = Material('f', val=[[0.02], [0.01]])
integral = Integral('i', order=3)
t1 = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
t2 = Term.new('dw_volume_lvf(f.val, v)', integral, omega, f=f, v=v)
eq = Equation('balance', t1 + t2)
eqs = | Equations([eq]) | sfepy.discrete.Equations |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
domain = FEDomain('domain', mesh)
min_x, max_x = domain.get_mesh_bounding_box()[:,0]
eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
gamma1 = domain.create_region('Gamma1',
'vertices in x < %.10f' % (min_x + eps),
'facet')
gamma2 = domain.create_region('Gamma2',
'vertices in x > %.10f' % (max_x - eps),
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=2)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
m = Material('m', D=stiffness_from_lame(dim=2, lam=1.0, mu=1.0))
f = Material('f', val=[[0.02], [0.01]])
integral = Integral('i', order=3)
t1 = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
t2 = Term.new('dw_volume_lvf(f.val, v)', integral, omega, f=f, v=v)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
fix_u = EssentialBC('fix_u', gamma1, {'u.all' : 0.0})
bc_fun = Function('shift_u_fun', shift_u_fun,
extra_args={'shift' : 0.01})
shift_u = EssentialBC('shift_u', gamma2, {'u.0' : bc_fun})
ls = | ScipyDirect({}) | sfepy.solvers.ls.ScipyDirect |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
domain = FEDomain('domain', mesh)
min_x, max_x = domain.get_mesh_bounding_box()[:,0]
eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
gamma1 = domain.create_region('Gamma1',
'vertices in x < %.10f' % (min_x + eps),
'facet')
gamma2 = domain.create_region('Gamma2',
'vertices in x > %.10f' % (max_x - eps),
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=2)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
m = Material('m', D=stiffness_from_lame(dim=2, lam=1.0, mu=1.0))
f = Material('f', val=[[0.02], [0.01]])
integral = Integral('i', order=3)
t1 = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
t2 = Term.new('dw_volume_lvf(f.val, v)', integral, omega, f=f, v=v)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
fix_u = EssentialBC('fix_u', gamma1, {'u.all' : 0.0})
bc_fun = Function('shift_u_fun', shift_u_fun,
extra_args={'shift' : 0.01})
shift_u = EssentialBC('shift_u', gamma2, {'u.0' : bc_fun})
ls = ScipyDirect({})
nls_status = | IndexedStruct() | sfepy.base.base.IndexedStruct |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
domain = FEDomain('domain', mesh)
min_x, max_x = domain.get_mesh_bounding_box()[:,0]
eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
gamma1 = domain.create_region('Gamma1',
'vertices in x < %.10f' % (min_x + eps),
'facet')
gamma2 = domain.create_region('Gamma2',
'vertices in x > %.10f' % (max_x - eps),
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=2)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
m = Material('m', D=stiffness_from_lame(dim=2, lam=1.0, mu=1.0))
f = Material('f', val=[[0.02], [0.01]])
integral = Integral('i', order=3)
t1 = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
t2 = Term.new('dw_volume_lvf(f.val, v)', integral, omega, f=f, v=v)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
fix_u = EssentialBC('fix_u', gamma1, {'u.all' : 0.0})
bc_fun = Function('shift_u_fun', shift_u_fun,
extra_args={'shift' : 0.01})
shift_u = EssentialBC('shift_u', gamma2, {'u.0' : bc_fun})
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = | Newton({}, lin_solver=ls, status=nls_status) | sfepy.solvers.nls.Newton |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
domain = FEDomain('domain', mesh)
min_x, max_x = domain.get_mesh_bounding_box()[:,0]
eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
gamma1 = domain.create_region('Gamma1',
'vertices in x < %.10f' % (min_x + eps),
'facet')
gamma2 = domain.create_region('Gamma2',
'vertices in x > %.10f' % (max_x - eps),
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=2)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
m = Material('m', D=stiffness_from_lame(dim=2, lam=1.0, mu=1.0))
f = Material('f', val=[[0.02], [0.01]])
integral = Integral('i', order=3)
t1 = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
t2 = Term.new('dw_volume_lvf(f.val, v)', integral, omega, f=f, v=v)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
fix_u = EssentialBC('fix_u', gamma1, {'u.all' : 0.0})
bc_fun = Function('shift_u_fun', shift_u_fun,
extra_args={'shift' : 0.01})
shift_u = EssentialBC('shift_u', gamma2, {'u.0' : bc_fun})
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton({}, lin_solver=ls, status=nls_status)
pb = | Problem('elasticity', equations=eqs, nls=nls, ls=ls) | sfepy.discrete.Problem |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
domain = FEDomain('domain', mesh)
min_x, max_x = domain.get_mesh_bounding_box()[:,0]
eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
gamma1 = domain.create_region('Gamma1',
'vertices in x < %.10f' % (min_x + eps),
'facet')
gamma2 = domain.create_region('Gamma2',
'vertices in x > %.10f' % (max_x - eps),
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=2)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
m = Material('m', D=stiffness_from_lame(dim=2, lam=1.0, mu=1.0))
f = Material('f', val=[[0.02], [0.01]])
integral = Integral('i', order=3)
t1 = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
t2 = Term.new('dw_volume_lvf(f.val, v)', integral, omega, f=f, v=v)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
fix_u = EssentialBC('fix_u', gamma1, {'u.all' : 0.0})
bc_fun = Function('shift_u_fun', shift_u_fun,
extra_args={'shift' : 0.01})
shift_u = EssentialBC('shift_u', gamma2, {'u.0' : bc_fun})
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton({}, lin_solver=ls, status=nls_status)
pb = Problem('elasticity', equations=eqs, nls=nls, ls=ls)
pb.save_regions_as_groups('regions')
pb.time_update(ebcs=Conditions([fix_u, shift_u]))
vec = pb.solve()
print(nls_status)
pb.save_state('linear_elasticity.vtk', vec)
if options.show:
view = | Viewer('linear_elasticity.vtk') | sfepy.postprocess.viewer.Viewer |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
domain = FEDomain('domain', mesh)
min_x, max_x = domain.get_mesh_bounding_box()[:,0]
eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
gamma1 = domain.create_region('Gamma1',
'vertices in x < %.10f' % (min_x + eps),
'facet')
gamma2 = domain.create_region('Gamma2',
'vertices in x > %.10f' % (max_x - eps),
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=2)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
m = Material('m', D= | stiffness_from_lame(dim=2, lam=1.0, mu=1.0) | sfepy.mechanics.matcoefs.stiffness_from_lame |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
domain = FEDomain('domain', mesh)
min_x, max_x = domain.get_mesh_bounding_box()[:,0]
eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
gamma1 = domain.create_region('Gamma1',
'vertices in x < %.10f' % (min_x + eps),
'facet')
gamma2 = domain.create_region('Gamma2',
'vertices in x > %.10f' % (max_x - eps),
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=2)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
m = Material('m', D=stiffness_from_lame(dim=2, lam=1.0, mu=1.0))
f = Material('f', val=[[0.02], [0.01]])
integral = Integral('i', order=3)
t1 = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
t2 = Term.new('dw_volume_lvf(f.val, v)', integral, omega, f=f, v=v)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
fix_u = EssentialBC('fix_u', gamma1, {'u.all' : 0.0})
bc_fun = Function('shift_u_fun', shift_u_fun,
extra_args={'shift' : 0.01})
shift_u = EssentialBC('shift_u', gamma2, {'u.0' : bc_fun})
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton({}, lin_solver=ls, status=nls_status)
pb = Problem('elasticity', equations=eqs, nls=nls, ls=ls)
pb.save_regions_as_groups('regions')
pb.time_update(ebcs= | Conditions([fix_u, shift_u]) | sfepy.discrete.conditions.Conditions |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
| output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm)) | sfepy.base.base.output |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = | Timer() | sfepy.base.timing.Timer |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
| output(aux) | sfepy.base.base.output |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
| pause('gradient checking done') | sfepy.base.base.pause |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
| output('**********', ii, aofg[ii], dofg[ii]) | sfepy.base.base.output |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
| OptimizationSolver.__init__(self, conf, **kwargs) | sfepy.solvers.solvers.OptimizationSolver.__init__ |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = | get_logging_conf(conf) | sfepy.base.log.get_logging_conf |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = | Struct(name='log_conf', **log) | sfepy.base.base.Struct |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = | get_default(conf, self.conf) | sfepy.base.base.get_default |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = | get_default(obj_fun, self.obj_fun) | sfepy.base.base.get_default |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = | get_default(obj_fun_grad, self.obj_fun_grad) | sfepy.base.base.get_default |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = | get_default(status, self.status) | sfepy.base.base.get_default |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = | get_default(obj_args, self.obj_args) | sfepy.base.base.get_default |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = get_default(obj_args, self.obj_args)
if conf.output:
globals()['output'] = conf.output
| output('entering optimization loop...') | sfepy.base.base.output |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = get_default(obj_args, self.obj_args)
if conf.output:
globals()['output'] = conf.output
output('entering optimization loop...')
nc_of, tt_of, fn_of = wrap_function(obj_fun, obj_args)
nc_ofg, tt_ofg, fn_ofg = wrap_function(obj_fun_grad, obj_args)
timer = | Timer() | sfepy.base.timing.Timer |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = get_default(obj_args, self.obj_args)
if conf.output:
globals()['output'] = conf.output
output('entering optimization loop...')
nc_of, tt_of, fn_of = wrap_function(obj_fun, obj_args)
nc_ofg, tt_ofg, fn_ofg = wrap_function(obj_fun_grad, obj_args)
timer = Timer()
time_stats = {'of' : tt_of, 'ofg': tt_ofg, 'check' : []}
ofg = None
it = 0
xit = x0.copy()
while 1:
of = fn_of(xit)
if it == 0:
of0 = ofit0 = of_prev = of
of_prev_prev = of + 5000.0
if ofg is None:
ofg = fn_ofg(xit)
if conf.check:
timer.start()
check_gradient(xit, ofg, fn_of, conf.delta, conf.check)
time_stats['check'].append(timer.stop())
ofg_norm = nla.norm(ofg, conf.norm)
ret = conv_test(conf, it, of, ofit0, ofg_norm)
if ret >= 0:
break
ofit0 = of
##
# Backtrack (on errors).
alpha = conf.ls0
can_ls = True
while 1:
xit2 = xit - alpha * ofg
aux = fn_of(xit2)
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if aux is None:
alpha *= conf.ls_red_warp
can_ls = False
output('warp: reducing step (%f)' % alpha)
elif conf.ls and conf.ls_method == 'backtracking':
if aux < of * conf.ls_on: break
alpha *= conf.ls_red
output('backtracking: reducing step (%f)' % alpha)
else:
of_prev_prev = of_prev
of_prev = aux
break
if alpha < conf.ls_min:
if aux is None:
raise RuntimeError('giving up...')
output('linesearch failed, continuing anyway')
break
# These values are modified by the line search, even if it fails
of_prev_bak = of_prev
of_prev_prev_bak = of_prev_prev
if conf.ls and can_ls and conf.ls_method == 'full':
output('full linesearch...')
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
linesearch.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev,of_prev_prev,
c2=0.4)
if alpha is None: # line search failed -- use different one.
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
sopt.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev_bak,
of_prev_prev_bak)
if alpha is None or alpha == 0:
# This line search also failed to find a better
# solution.
ret = 3
break
output(' -> alpha: %.8e' % alpha)
else:
if conf.ls_method == 'full':
output('full linesearch off (%s and %s)'
% (conf.ls, can_ls))
ofg1 = None
if self.log is not None:
self.log.plot_vlines(color='g', linewidth=0.5)
xit = xit - alpha * ofg
if ofg1 is None:
ofg = None
else:
ofg = ofg1.copy()
for key, val in six.iteritems(time_stats):
if len(val):
output('%10s: %7.2f [s]' % (key, val[-1]))
it = it + 1
| output('status: %d' % ret) | sfepy.base.base.output |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = get_default(obj_args, self.obj_args)
if conf.output:
globals()['output'] = conf.output
output('entering optimization loop...')
nc_of, tt_of, fn_of = wrap_function(obj_fun, obj_args)
nc_ofg, tt_ofg, fn_ofg = wrap_function(obj_fun_grad, obj_args)
timer = Timer()
time_stats = {'of' : tt_of, 'ofg': tt_ofg, 'check' : []}
ofg = None
it = 0
xit = x0.copy()
while 1:
of = fn_of(xit)
if it == 0:
of0 = ofit0 = of_prev = of
of_prev_prev = of + 5000.0
if ofg is None:
ofg = fn_ofg(xit)
if conf.check:
timer.start()
check_gradient(xit, ofg, fn_of, conf.delta, conf.check)
time_stats['check'].append(timer.stop())
ofg_norm = nla.norm(ofg, conf.norm)
ret = conv_test(conf, it, of, ofit0, ofg_norm)
if ret >= 0:
break
ofit0 = of
##
# Backtrack (on errors).
alpha = conf.ls0
can_ls = True
while 1:
xit2 = xit - alpha * ofg
aux = fn_of(xit2)
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if aux is None:
alpha *= conf.ls_red_warp
can_ls = False
output('warp: reducing step (%f)' % alpha)
elif conf.ls and conf.ls_method == 'backtracking':
if aux < of * conf.ls_on: break
alpha *= conf.ls_red
output('backtracking: reducing step (%f)' % alpha)
else:
of_prev_prev = of_prev
of_prev = aux
break
if alpha < conf.ls_min:
if aux is None:
raise RuntimeError('giving up...')
output('linesearch failed, continuing anyway')
break
# These values are modified by the line search, even if it fails
of_prev_bak = of_prev
of_prev_prev_bak = of_prev_prev
if conf.ls and can_ls and conf.ls_method == 'full':
output('full linesearch...')
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
linesearch.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev,of_prev_prev,
c2=0.4)
if alpha is None: # line search failed -- use different one.
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
sopt.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev_bak,
of_prev_prev_bak)
if alpha is None or alpha == 0:
# This line search also failed to find a better
# solution.
ret = 3
break
output(' -> alpha: %.8e' % alpha)
else:
if conf.ls_method == 'full':
output('full linesearch off (%s and %s)'
% (conf.ls, can_ls))
ofg1 = None
if self.log is not None:
self.log.plot_vlines(color='g', linewidth=0.5)
xit = xit - alpha * ofg
if ofg1 is None:
ofg = None
else:
ofg = ofg1.copy()
for key, val in six.iteritems(time_stats):
if len(val):
output('%10s: %7.2f [s]' % (key, val[-1]))
it = it + 1
output('status: %d' % ret)
| output('initial value: %.8e' % of0) | sfepy.base.base.output |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = get_default(obj_args, self.obj_args)
if conf.output:
globals()['output'] = conf.output
output('entering optimization loop...')
nc_of, tt_of, fn_of = wrap_function(obj_fun, obj_args)
nc_ofg, tt_ofg, fn_ofg = wrap_function(obj_fun_grad, obj_args)
timer = Timer()
time_stats = {'of' : tt_of, 'ofg': tt_ofg, 'check' : []}
ofg = None
it = 0
xit = x0.copy()
while 1:
of = fn_of(xit)
if it == 0:
of0 = ofit0 = of_prev = of
of_prev_prev = of + 5000.0
if ofg is None:
ofg = fn_ofg(xit)
if conf.check:
timer.start()
check_gradient(xit, ofg, fn_of, conf.delta, conf.check)
time_stats['check'].append(timer.stop())
ofg_norm = nla.norm(ofg, conf.norm)
ret = conv_test(conf, it, of, ofit0, ofg_norm)
if ret >= 0:
break
ofit0 = of
##
# Backtrack (on errors).
alpha = conf.ls0
can_ls = True
while 1:
xit2 = xit - alpha * ofg
aux = fn_of(xit2)
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if aux is None:
alpha *= conf.ls_red_warp
can_ls = False
output('warp: reducing step (%f)' % alpha)
elif conf.ls and conf.ls_method == 'backtracking':
if aux < of * conf.ls_on: break
alpha *= conf.ls_red
output('backtracking: reducing step (%f)' % alpha)
else:
of_prev_prev = of_prev
of_prev = aux
break
if alpha < conf.ls_min:
if aux is None:
raise RuntimeError('giving up...')
output('linesearch failed, continuing anyway')
break
# These values are modified by the line search, even if it fails
of_prev_bak = of_prev
of_prev_prev_bak = of_prev_prev
if conf.ls and can_ls and conf.ls_method == 'full':
output('full linesearch...')
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
linesearch.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev,of_prev_prev,
c2=0.4)
if alpha is None: # line search failed -- use different one.
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
sopt.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev_bak,
of_prev_prev_bak)
if alpha is None or alpha == 0:
# This line search also failed to find a better
# solution.
ret = 3
break
output(' -> alpha: %.8e' % alpha)
else:
if conf.ls_method == 'full':
output('full linesearch off (%s and %s)'
% (conf.ls, can_ls))
ofg1 = None
if self.log is not None:
self.log.plot_vlines(color='g', linewidth=0.5)
xit = xit - alpha * ofg
if ofg1 is None:
ofg = None
else:
ofg = ofg1.copy()
for key, val in six.iteritems(time_stats):
if len(val):
output('%10s: %7.2f [s]' % (key, val[-1]))
it = it + 1
output('status: %d' % ret)
output('initial value: %.8e' % of0)
| output('current value: %.8e' % of) | sfepy.base.base.output |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = get_default(obj_args, self.obj_args)
if conf.output:
globals()['output'] = conf.output
output('entering optimization loop...')
nc_of, tt_of, fn_of = wrap_function(obj_fun, obj_args)
nc_ofg, tt_ofg, fn_ofg = wrap_function(obj_fun_grad, obj_args)
timer = Timer()
time_stats = {'of' : tt_of, 'ofg': tt_ofg, 'check' : []}
ofg = None
it = 0
xit = x0.copy()
while 1:
of = fn_of(xit)
if it == 0:
of0 = ofit0 = of_prev = of
of_prev_prev = of + 5000.0
if ofg is None:
ofg = fn_ofg(xit)
if conf.check:
timer.start()
check_gradient(xit, ofg, fn_of, conf.delta, conf.check)
time_stats['check'].append(timer.stop())
ofg_norm = nla.norm(ofg, conf.norm)
ret = conv_test(conf, it, of, ofit0, ofg_norm)
if ret >= 0:
break
ofit0 = of
##
# Backtrack (on errors).
alpha = conf.ls0
can_ls = True
while 1:
xit2 = xit - alpha * ofg
aux = fn_of(xit2)
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if aux is None:
alpha *= conf.ls_red_warp
can_ls = False
output('warp: reducing step (%f)' % alpha)
elif conf.ls and conf.ls_method == 'backtracking':
if aux < of * conf.ls_on: break
alpha *= conf.ls_red
output('backtracking: reducing step (%f)' % alpha)
else:
of_prev_prev = of_prev
of_prev = aux
break
if alpha < conf.ls_min:
if aux is None:
raise RuntimeError('giving up...')
output('linesearch failed, continuing anyway')
break
# These values are modified by the line search, even if it fails
of_prev_bak = of_prev
of_prev_prev_bak = of_prev_prev
if conf.ls and can_ls and conf.ls_method == 'full':
output('full linesearch...')
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
linesearch.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev,of_prev_prev,
c2=0.4)
if alpha is None: # line search failed -- use different one.
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
sopt.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev_bak,
of_prev_prev_bak)
if alpha is None or alpha == 0:
# This line search also failed to find a better
# solution.
ret = 3
break
output(' -> alpha: %.8e' % alpha)
else:
if conf.ls_method == 'full':
output('full linesearch off (%s and %s)'
% (conf.ls, can_ls))
ofg1 = None
if self.log is not None:
self.log.plot_vlines(color='g', linewidth=0.5)
xit = xit - alpha * ofg
if ofg1 is None:
ofg = None
else:
ofg = ofg1.copy()
for key, val in six.iteritems(time_stats):
if len(val):
output('%10s: %7.2f [s]' % (key, val[-1]))
it = it + 1
output('status: %d' % ret)
output('initial value: %.8e' % of0)
output('current value: %.8e' % of)
| output('iterations: %d' % it) | sfepy.base.base.output |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = get_default(obj_args, self.obj_args)
if conf.output:
globals()['output'] = conf.output
output('entering optimization loop...')
nc_of, tt_of, fn_of = wrap_function(obj_fun, obj_args)
nc_ofg, tt_ofg, fn_ofg = wrap_function(obj_fun_grad, obj_args)
timer = Timer()
time_stats = {'of' : tt_of, 'ofg': tt_ofg, 'check' : []}
ofg = None
it = 0
xit = x0.copy()
while 1:
of = fn_of(xit)
if it == 0:
of0 = ofit0 = of_prev = of
of_prev_prev = of + 5000.0
if ofg is None:
ofg = fn_ofg(xit)
if conf.check:
timer.start()
check_gradient(xit, ofg, fn_of, conf.delta, conf.check)
time_stats['check'].append(timer.stop())
ofg_norm = nla.norm(ofg, conf.norm)
ret = conv_test(conf, it, of, ofit0, ofg_norm)
if ret >= 0:
break
ofit0 = of
##
# Backtrack (on errors).
alpha = conf.ls0
can_ls = True
while 1:
xit2 = xit - alpha * ofg
aux = fn_of(xit2)
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if aux is None:
alpha *= conf.ls_red_warp
can_ls = False
output('warp: reducing step (%f)' % alpha)
elif conf.ls and conf.ls_method == 'backtracking':
if aux < of * conf.ls_on: break
alpha *= conf.ls_red
output('backtracking: reducing step (%f)' % alpha)
else:
of_prev_prev = of_prev
of_prev = aux
break
if alpha < conf.ls_min:
if aux is None:
raise RuntimeError('giving up...')
output('linesearch failed, continuing anyway')
break
# These values are modified by the line search, even if it fails
of_prev_bak = of_prev
of_prev_prev_bak = of_prev_prev
if conf.ls and can_ls and conf.ls_method == 'full':
output('full linesearch...')
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
linesearch.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev,of_prev_prev,
c2=0.4)
if alpha is None: # line search failed -- use different one.
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
sopt.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev_bak,
of_prev_prev_bak)
if alpha is None or alpha == 0:
# This line search also failed to find a better
# solution.
ret = 3
break
output(' -> alpha: %.8e' % alpha)
else:
if conf.ls_method == 'full':
output('full linesearch off (%s and %s)'
% (conf.ls, can_ls))
ofg1 = None
if self.log is not None:
self.log.plot_vlines(color='g', linewidth=0.5)
xit = xit - alpha * ofg
if ofg1 is None:
ofg = None
else:
ofg = ofg1.copy()
for key, val in six.iteritems(time_stats):
if len(val):
output('%10s: %7.2f [s]' % (key, val[-1]))
it = it + 1
output('status: %d' % ret)
output('initial value: %.8e' % of0)
output('current value: %.8e' % of)
output('iterations: %d' % it)
output('function evaluations: %d in %.2f [s]'
% (nc_of[0], nm.sum(time_stats['of'])))
output('gradient evaluations: %d in %.2f [s]'
% (nc_ofg[0], nm.sum(time_stats['ofg'])))
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if conf.log.plot is not None:
self.log(save_figure=conf.log.plot, finished=True)
else:
self.log(finished=True)
if status is not None:
status['log'] = self.log
status['status'] = status
status['of0'] = of0
status['of'] = of
status['it'] = it
status['nc_of'] = nc_of[0]
status['nc_ofg'] = nc_ofg[0]
status['time_stats'] = time_stats
return xit
class ScipyFMinSolver(OptimizationSolver):
"""
Interface to SciPy optimization solvers scipy.optimize.fmin_*.
"""
name = 'nls.scipy_fmin_like'
_i_max_name = {
'fmin' : 'maxiter',
'fmin_bfgs' : 'maxiter',
'fmin_cg' : 'maxiter',
'fmin_cobyla' : 'maxfun',
'fmin_l_bfgs_b' : 'maxfun',
'fmin_ncg' : 'maxiter',
'fmin_powell' : 'maxiter',
'fmin_slsqp' : 'iter',
'fmin_tnc' : 'maxfun',
}
_has_grad = ('fmin_bfgs', 'fmin_cg', 'fmin_l_bfgs_b', 'fmin_ncg',
'fmin_slsqp', 'fmin_tnc')
_parameters = [
('method',
'{%s}' % ', '.join(sorted(repr(ii) for ii in _i_max_name.keys())),
'fmin', False,
'The actual optimization method to use.'),
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
def __init__(self, conf, **kwargs):
| OptimizationSolver.__init__(self, conf, **kwargs) | sfepy.solvers.solvers.OptimizationSolver.__init__ |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = get_default(obj_args, self.obj_args)
if conf.output:
globals()['output'] = conf.output
output('entering optimization loop...')
nc_of, tt_of, fn_of = wrap_function(obj_fun, obj_args)
nc_ofg, tt_ofg, fn_ofg = wrap_function(obj_fun_grad, obj_args)
timer = Timer()
time_stats = {'of' : tt_of, 'ofg': tt_ofg, 'check' : []}
ofg = None
it = 0
xit = x0.copy()
while 1:
of = fn_of(xit)
if it == 0:
of0 = ofit0 = of_prev = of
of_prev_prev = of + 5000.0
if ofg is None:
ofg = fn_ofg(xit)
if conf.check:
timer.start()
check_gradient(xit, ofg, fn_of, conf.delta, conf.check)
time_stats['check'].append(timer.stop())
ofg_norm = nla.norm(ofg, conf.norm)
ret = conv_test(conf, it, of, ofit0, ofg_norm)
if ret >= 0:
break
ofit0 = of
##
# Backtrack (on errors).
alpha = conf.ls0
can_ls = True
while 1:
xit2 = xit - alpha * ofg
aux = fn_of(xit2)
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if aux is None:
alpha *= conf.ls_red_warp
can_ls = False
output('warp: reducing step (%f)' % alpha)
elif conf.ls and conf.ls_method == 'backtracking':
if aux < of * conf.ls_on: break
alpha *= conf.ls_red
output('backtracking: reducing step (%f)' % alpha)
else:
of_prev_prev = of_prev
of_prev = aux
break
if alpha < conf.ls_min:
if aux is None:
raise RuntimeError('giving up...')
output('linesearch failed, continuing anyway')
break
# These values are modified by the line search, even if it fails
of_prev_bak = of_prev
of_prev_prev_bak = of_prev_prev
if conf.ls and can_ls and conf.ls_method == 'full':
output('full linesearch...')
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
linesearch.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev,of_prev_prev,
c2=0.4)
if alpha is None: # line search failed -- use different one.
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
sopt.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev_bak,
of_prev_prev_bak)
if alpha is None or alpha == 0:
# This line search also failed to find a better
# solution.
ret = 3
break
output(' -> alpha: %.8e' % alpha)
else:
if conf.ls_method == 'full':
output('full linesearch off (%s and %s)'
% (conf.ls, can_ls))
ofg1 = None
if self.log is not None:
self.log.plot_vlines(color='g', linewidth=0.5)
xit = xit - alpha * ofg
if ofg1 is None:
ofg = None
else:
ofg = ofg1.copy()
for key, val in six.iteritems(time_stats):
if len(val):
output('%10s: %7.2f [s]' % (key, val[-1]))
it = it + 1
output('status: %d' % ret)
output('initial value: %.8e' % of0)
output('current value: %.8e' % of)
output('iterations: %d' % it)
output('function evaluations: %d in %.2f [s]'
% (nc_of[0], nm.sum(time_stats['of'])))
output('gradient evaluations: %d in %.2f [s]'
% (nc_ofg[0], nm.sum(time_stats['ofg'])))
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if conf.log.plot is not None:
self.log(save_figure=conf.log.plot, finished=True)
else:
self.log(finished=True)
if status is not None:
status['log'] = self.log
status['status'] = status
status['of0'] = of0
status['of'] = of
status['it'] = it
status['nc_of'] = nc_of[0]
status['nc_ofg'] = nc_ofg[0]
status['time_stats'] = time_stats
return xit
class ScipyFMinSolver(OptimizationSolver):
"""
Interface to SciPy optimization solvers scipy.optimize.fmin_*.
"""
name = 'nls.scipy_fmin_like'
_i_max_name = {
'fmin' : 'maxiter',
'fmin_bfgs' : 'maxiter',
'fmin_cg' : 'maxiter',
'fmin_cobyla' : 'maxfun',
'fmin_l_bfgs_b' : 'maxfun',
'fmin_ncg' : 'maxiter',
'fmin_powell' : 'maxiter',
'fmin_slsqp' : 'iter',
'fmin_tnc' : 'maxfun',
}
_has_grad = ('fmin_bfgs', 'fmin_cg', 'fmin_l_bfgs_b', 'fmin_ncg',
'fmin_slsqp', 'fmin_tnc')
_parameters = [
('method',
'{%s}' % ', '.join(sorted(repr(ii) for ii in _i_max_name.keys())),
'fmin', False,
'The actual optimization method to use.'),
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
self.set_method(self.conf)
def set_method(self, conf):
import scipy.optimize as so
try:
solver = getattr(so, conf.method)
except AttributeError:
raise ValueError('scipy solver %s does not exist!' % conf.method)
self.solver = solver
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
import inspect
if conf is not None:
self.set_method(conf)
else:
conf = self.conf
obj_fun = | get_default(obj_fun, self.obj_fun) | sfepy.base.base.get_default |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = get_default(obj_args, self.obj_args)
if conf.output:
globals()['output'] = conf.output
output('entering optimization loop...')
nc_of, tt_of, fn_of = wrap_function(obj_fun, obj_args)
nc_ofg, tt_ofg, fn_ofg = wrap_function(obj_fun_grad, obj_args)
timer = Timer()
time_stats = {'of' : tt_of, 'ofg': tt_ofg, 'check' : []}
ofg = None
it = 0
xit = x0.copy()
while 1:
of = fn_of(xit)
if it == 0:
of0 = ofit0 = of_prev = of
of_prev_prev = of + 5000.0
if ofg is None:
ofg = fn_ofg(xit)
if conf.check:
timer.start()
check_gradient(xit, ofg, fn_of, conf.delta, conf.check)
time_stats['check'].append(timer.stop())
ofg_norm = nla.norm(ofg, conf.norm)
ret = conv_test(conf, it, of, ofit0, ofg_norm)
if ret >= 0:
break
ofit0 = of
##
# Backtrack (on errors).
alpha = conf.ls0
can_ls = True
while 1:
xit2 = xit - alpha * ofg
aux = fn_of(xit2)
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if aux is None:
alpha *= conf.ls_red_warp
can_ls = False
output('warp: reducing step (%f)' % alpha)
elif conf.ls and conf.ls_method == 'backtracking':
if aux < of * conf.ls_on: break
alpha *= conf.ls_red
output('backtracking: reducing step (%f)' % alpha)
else:
of_prev_prev = of_prev
of_prev = aux
break
if alpha < conf.ls_min:
if aux is None:
raise RuntimeError('giving up...')
output('linesearch failed, continuing anyway')
break
# These values are modified by the line search, even if it fails
of_prev_bak = of_prev
of_prev_prev_bak = of_prev_prev
if conf.ls and can_ls and conf.ls_method == 'full':
output('full linesearch...')
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
linesearch.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev,of_prev_prev,
c2=0.4)
if alpha is None: # line search failed -- use different one.
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
sopt.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev_bak,
of_prev_prev_bak)
if alpha is None or alpha == 0:
# This line search also failed to find a better
# solution.
ret = 3
break
output(' -> alpha: %.8e' % alpha)
else:
if conf.ls_method == 'full':
output('full linesearch off (%s and %s)'
% (conf.ls, can_ls))
ofg1 = None
if self.log is not None:
self.log.plot_vlines(color='g', linewidth=0.5)
xit = xit - alpha * ofg
if ofg1 is None:
ofg = None
else:
ofg = ofg1.copy()
for key, val in six.iteritems(time_stats):
if len(val):
output('%10s: %7.2f [s]' % (key, val[-1]))
it = it + 1
output('status: %d' % ret)
output('initial value: %.8e' % of0)
output('current value: %.8e' % of)
output('iterations: %d' % it)
output('function evaluations: %d in %.2f [s]'
% (nc_of[0], nm.sum(time_stats['of'])))
output('gradient evaluations: %d in %.2f [s]'
% (nc_ofg[0], nm.sum(time_stats['ofg'])))
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if conf.log.plot is not None:
self.log(save_figure=conf.log.plot, finished=True)
else:
self.log(finished=True)
if status is not None:
status['log'] = self.log
status['status'] = status
status['of0'] = of0
status['of'] = of
status['it'] = it
status['nc_of'] = nc_of[0]
status['nc_ofg'] = nc_ofg[0]
status['time_stats'] = time_stats
return xit
class ScipyFMinSolver(OptimizationSolver):
"""
Interface to SciPy optimization solvers scipy.optimize.fmin_*.
"""
name = 'nls.scipy_fmin_like'
_i_max_name = {
'fmin' : 'maxiter',
'fmin_bfgs' : 'maxiter',
'fmin_cg' : 'maxiter',
'fmin_cobyla' : 'maxfun',
'fmin_l_bfgs_b' : 'maxfun',
'fmin_ncg' : 'maxiter',
'fmin_powell' : 'maxiter',
'fmin_slsqp' : 'iter',
'fmin_tnc' : 'maxfun',
}
_has_grad = ('fmin_bfgs', 'fmin_cg', 'fmin_l_bfgs_b', 'fmin_ncg',
'fmin_slsqp', 'fmin_tnc')
_parameters = [
('method',
'{%s}' % ', '.join(sorted(repr(ii) for ii in _i_max_name.keys())),
'fmin', False,
'The actual optimization method to use.'),
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
self.set_method(self.conf)
def set_method(self, conf):
import scipy.optimize as so
try:
solver = getattr(so, conf.method)
except AttributeError:
raise ValueError('scipy solver %s does not exist!' % conf.method)
self.solver = solver
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
import inspect
if conf is not None:
self.set_method(conf)
else:
conf = self.conf
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = | get_default(obj_fun_grad, self.obj_fun_grad) | sfepy.base.base.get_default |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = get_default(obj_args, self.obj_args)
if conf.output:
globals()['output'] = conf.output
output('entering optimization loop...')
nc_of, tt_of, fn_of = wrap_function(obj_fun, obj_args)
nc_ofg, tt_ofg, fn_ofg = wrap_function(obj_fun_grad, obj_args)
timer = Timer()
time_stats = {'of' : tt_of, 'ofg': tt_ofg, 'check' : []}
ofg = None
it = 0
xit = x0.copy()
while 1:
of = fn_of(xit)
if it == 0:
of0 = ofit0 = of_prev = of
of_prev_prev = of + 5000.0
if ofg is None:
ofg = fn_ofg(xit)
if conf.check:
timer.start()
check_gradient(xit, ofg, fn_of, conf.delta, conf.check)
time_stats['check'].append(timer.stop())
ofg_norm = nla.norm(ofg, conf.norm)
ret = conv_test(conf, it, of, ofit0, ofg_norm)
if ret >= 0:
break
ofit0 = of
##
# Backtrack (on errors).
alpha = conf.ls0
can_ls = True
while 1:
xit2 = xit - alpha * ofg
aux = fn_of(xit2)
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if aux is None:
alpha *= conf.ls_red_warp
can_ls = False
output('warp: reducing step (%f)' % alpha)
elif conf.ls and conf.ls_method == 'backtracking':
if aux < of * conf.ls_on: break
alpha *= conf.ls_red
output('backtracking: reducing step (%f)' % alpha)
else:
of_prev_prev = of_prev
of_prev = aux
break
if alpha < conf.ls_min:
if aux is None:
raise RuntimeError('giving up...')
output('linesearch failed, continuing anyway')
break
# These values are modified by the line search, even if it fails
of_prev_bak = of_prev
of_prev_prev_bak = of_prev_prev
if conf.ls and can_ls and conf.ls_method == 'full':
output('full linesearch...')
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
linesearch.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev,of_prev_prev,
c2=0.4)
if alpha is None: # line search failed -- use different one.
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
sopt.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev_bak,
of_prev_prev_bak)
if alpha is None or alpha == 0:
# This line search also failed to find a better
# solution.
ret = 3
break
output(' -> alpha: %.8e' % alpha)
else:
if conf.ls_method == 'full':
output('full linesearch off (%s and %s)'
% (conf.ls, can_ls))
ofg1 = None
if self.log is not None:
self.log.plot_vlines(color='g', linewidth=0.5)
xit = xit - alpha * ofg
if ofg1 is None:
ofg = None
else:
ofg = ofg1.copy()
for key, val in six.iteritems(time_stats):
if len(val):
output('%10s: %7.2f [s]' % (key, val[-1]))
it = it + 1
output('status: %d' % ret)
output('initial value: %.8e' % of0)
output('current value: %.8e' % of)
output('iterations: %d' % it)
output('function evaluations: %d in %.2f [s]'
% (nc_of[0], nm.sum(time_stats['of'])))
output('gradient evaluations: %d in %.2f [s]'
% (nc_ofg[0], nm.sum(time_stats['ofg'])))
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if conf.log.plot is not None:
self.log(save_figure=conf.log.plot, finished=True)
else:
self.log(finished=True)
if status is not None:
status['log'] = self.log
status['status'] = status
status['of0'] = of0
status['of'] = of
status['it'] = it
status['nc_of'] = nc_of[0]
status['nc_ofg'] = nc_ofg[0]
status['time_stats'] = time_stats
return xit
class ScipyFMinSolver(OptimizationSolver):
"""
Interface to SciPy optimization solvers scipy.optimize.fmin_*.
"""
name = 'nls.scipy_fmin_like'
_i_max_name = {
'fmin' : 'maxiter',
'fmin_bfgs' : 'maxiter',
'fmin_cg' : 'maxiter',
'fmin_cobyla' : 'maxfun',
'fmin_l_bfgs_b' : 'maxfun',
'fmin_ncg' : 'maxiter',
'fmin_powell' : 'maxiter',
'fmin_slsqp' : 'iter',
'fmin_tnc' : 'maxfun',
}
_has_grad = ('fmin_bfgs', 'fmin_cg', 'fmin_l_bfgs_b', 'fmin_ncg',
'fmin_slsqp', 'fmin_tnc')
_parameters = [
('method',
'{%s}' % ', '.join(sorted(repr(ii) for ii in _i_max_name.keys())),
'fmin', False,
'The actual optimization method to use.'),
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
self.set_method(self.conf)
def set_method(self, conf):
import scipy.optimize as so
try:
solver = getattr(so, conf.method)
except AttributeError:
raise ValueError('scipy solver %s does not exist!' % conf.method)
self.solver = solver
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
import inspect
if conf is not None:
self.set_method(conf)
else:
conf = self.conf
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = | get_default(status, self.status) | sfepy.base.base.get_default |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = get_default(obj_args, self.obj_args)
if conf.output:
globals()['output'] = conf.output
output('entering optimization loop...')
nc_of, tt_of, fn_of = wrap_function(obj_fun, obj_args)
nc_ofg, tt_ofg, fn_ofg = wrap_function(obj_fun_grad, obj_args)
timer = Timer()
time_stats = {'of' : tt_of, 'ofg': tt_ofg, 'check' : []}
ofg = None
it = 0
xit = x0.copy()
while 1:
of = fn_of(xit)
if it == 0:
of0 = ofit0 = of_prev = of
of_prev_prev = of + 5000.0
if ofg is None:
ofg = fn_ofg(xit)
if conf.check:
timer.start()
check_gradient(xit, ofg, fn_of, conf.delta, conf.check)
time_stats['check'].append(timer.stop())
ofg_norm = nla.norm(ofg, conf.norm)
ret = conv_test(conf, it, of, ofit0, ofg_norm)
if ret >= 0:
break
ofit0 = of
##
# Backtrack (on errors).
alpha = conf.ls0
can_ls = True
while 1:
xit2 = xit - alpha * ofg
aux = fn_of(xit2)
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if aux is None:
alpha *= conf.ls_red_warp
can_ls = False
output('warp: reducing step (%f)' % alpha)
elif conf.ls and conf.ls_method == 'backtracking':
if aux < of * conf.ls_on: break
alpha *= conf.ls_red
output('backtracking: reducing step (%f)' % alpha)
else:
of_prev_prev = of_prev
of_prev = aux
break
if alpha < conf.ls_min:
if aux is None:
raise RuntimeError('giving up...')
output('linesearch failed, continuing anyway')
break
# These values are modified by the line search, even if it fails
of_prev_bak = of_prev
of_prev_prev_bak = of_prev_prev
if conf.ls and can_ls and conf.ls_method == 'full':
output('full linesearch...')
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
linesearch.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev,of_prev_prev,
c2=0.4)
if alpha is None: # line search failed -- use different one.
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
sopt.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev_bak,
of_prev_prev_bak)
if alpha is None or alpha == 0:
# This line search also failed to find a better
# solution.
ret = 3
break
output(' -> alpha: %.8e' % alpha)
else:
if conf.ls_method == 'full':
output('full linesearch off (%s and %s)'
% (conf.ls, can_ls))
ofg1 = None
if self.log is not None:
self.log.plot_vlines(color='g', linewidth=0.5)
xit = xit - alpha * ofg
if ofg1 is None:
ofg = None
else:
ofg = ofg1.copy()
for key, val in six.iteritems(time_stats):
if len(val):
output('%10s: %7.2f [s]' % (key, val[-1]))
it = it + 1
output('status: %d' % ret)
output('initial value: %.8e' % of0)
output('current value: %.8e' % of)
output('iterations: %d' % it)
output('function evaluations: %d in %.2f [s]'
% (nc_of[0], nm.sum(time_stats['of'])))
output('gradient evaluations: %d in %.2f [s]'
% (nc_ofg[0], nm.sum(time_stats['ofg'])))
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if conf.log.plot is not None:
self.log(save_figure=conf.log.plot, finished=True)
else:
self.log(finished=True)
if status is not None:
status['log'] = self.log
status['status'] = status
status['of0'] = of0
status['of'] = of
status['it'] = it
status['nc_of'] = nc_of[0]
status['nc_ofg'] = nc_ofg[0]
status['time_stats'] = time_stats
return xit
class ScipyFMinSolver(OptimizationSolver):
"""
Interface to SciPy optimization solvers scipy.optimize.fmin_*.
"""
name = 'nls.scipy_fmin_like'
_i_max_name = {
'fmin' : 'maxiter',
'fmin_bfgs' : 'maxiter',
'fmin_cg' : 'maxiter',
'fmin_cobyla' : 'maxfun',
'fmin_l_bfgs_b' : 'maxfun',
'fmin_ncg' : 'maxiter',
'fmin_powell' : 'maxiter',
'fmin_slsqp' : 'iter',
'fmin_tnc' : 'maxfun',
}
_has_grad = ('fmin_bfgs', 'fmin_cg', 'fmin_l_bfgs_b', 'fmin_ncg',
'fmin_slsqp', 'fmin_tnc')
_parameters = [
('method',
'{%s}' % ', '.join(sorted(repr(ii) for ii in _i_max_name.keys())),
'fmin', False,
'The actual optimization method to use.'),
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
self.set_method(self.conf)
def set_method(self, conf):
import scipy.optimize as so
try:
solver = getattr(so, conf.method)
except AttributeError:
raise ValueError('scipy solver %s does not exist!' % conf.method)
self.solver = solver
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
import inspect
if conf is not None:
self.set_method(conf)
else:
conf = self.conf
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = | get_default(obj_args, self.obj_args) | sfepy.base.base.get_default |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = get_default(obj_args, self.obj_args)
if conf.output:
globals()['output'] = conf.output
output('entering optimization loop...')
nc_of, tt_of, fn_of = wrap_function(obj_fun, obj_args)
nc_ofg, tt_ofg, fn_ofg = wrap_function(obj_fun_grad, obj_args)
timer = Timer()
time_stats = {'of' : tt_of, 'ofg': tt_ofg, 'check' : []}
ofg = None
it = 0
xit = x0.copy()
while 1:
of = fn_of(xit)
if it == 0:
of0 = ofit0 = of_prev = of
of_prev_prev = of + 5000.0
if ofg is None:
ofg = fn_ofg(xit)
if conf.check:
timer.start()
check_gradient(xit, ofg, fn_of, conf.delta, conf.check)
time_stats['check'].append(timer.stop())
ofg_norm = nla.norm(ofg, conf.norm)
ret = conv_test(conf, it, of, ofit0, ofg_norm)
if ret >= 0:
break
ofit0 = of
##
# Backtrack (on errors).
alpha = conf.ls0
can_ls = True
while 1:
xit2 = xit - alpha * ofg
aux = fn_of(xit2)
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if aux is None:
alpha *= conf.ls_red_warp
can_ls = False
output('warp: reducing step (%f)' % alpha)
elif conf.ls and conf.ls_method == 'backtracking':
if aux < of * conf.ls_on: break
alpha *= conf.ls_red
output('backtracking: reducing step (%f)' % alpha)
else:
of_prev_prev = of_prev
of_prev = aux
break
if alpha < conf.ls_min:
if aux is None:
raise RuntimeError('giving up...')
output('linesearch failed, continuing anyway')
break
# These values are modified by the line search, even if it fails
of_prev_bak = of_prev
of_prev_prev_bak = of_prev_prev
if conf.ls and can_ls and conf.ls_method == 'full':
output('full linesearch...')
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
linesearch.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev,of_prev_prev,
c2=0.4)
if alpha is None: # line search failed -- use different one.
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
sopt.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev_bak,
of_prev_prev_bak)
if alpha is None or alpha == 0:
# This line search also failed to find a better
# solution.
ret = 3
break
output(' -> alpha: %.8e' % alpha)
else:
if conf.ls_method == 'full':
output('full linesearch off (%s and %s)'
% (conf.ls, can_ls))
ofg1 = None
if self.log is not None:
self.log.plot_vlines(color='g', linewidth=0.5)
xit = xit - alpha * ofg
if ofg1 is None:
ofg = None
else:
ofg = ofg1.copy()
for key, val in six.iteritems(time_stats):
if len(val):
output('%10s: %7.2f [s]' % (key, val[-1]))
it = it + 1
output('status: %d' % ret)
output('initial value: %.8e' % of0)
output('current value: %.8e' % of)
output('iterations: %d' % it)
output('function evaluations: %d in %.2f [s]'
% (nc_of[0], nm.sum(time_stats['of'])))
output('gradient evaluations: %d in %.2f [s]'
% (nc_ofg[0], nm.sum(time_stats['ofg'])))
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if conf.log.plot is not None:
self.log(save_figure=conf.log.plot, finished=True)
else:
self.log(finished=True)
if status is not None:
status['log'] = self.log
status['status'] = status
status['of0'] = of0
status['of'] = of
status['it'] = it
status['nc_of'] = nc_of[0]
status['nc_ofg'] = nc_ofg[0]
status['time_stats'] = time_stats
return xit
class ScipyFMinSolver(OptimizationSolver):
"""
Interface to SciPy optimization solvers scipy.optimize.fmin_*.
"""
name = 'nls.scipy_fmin_like'
_i_max_name = {
'fmin' : 'maxiter',
'fmin_bfgs' : 'maxiter',
'fmin_cg' : 'maxiter',
'fmin_cobyla' : 'maxfun',
'fmin_l_bfgs_b' : 'maxfun',
'fmin_ncg' : 'maxiter',
'fmin_powell' : 'maxiter',
'fmin_slsqp' : 'iter',
'fmin_tnc' : 'maxfun',
}
_has_grad = ('fmin_bfgs', 'fmin_cg', 'fmin_l_bfgs_b', 'fmin_ncg',
'fmin_slsqp', 'fmin_tnc')
_parameters = [
('method',
'{%s}' % ', '.join(sorted(repr(ii) for ii in _i_max_name.keys())),
'fmin', False,
'The actual optimization method to use.'),
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('*', '*', None, False,
'Additional parameters supported by the method.'),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
self.set_method(self.conf)
def set_method(self, conf):
import scipy.optimize as so
try:
solver = getattr(so, conf.method)
except AttributeError:
raise ValueError('scipy solver %s does not exist!' % conf.method)
self.solver = solver
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
import inspect
if conf is not None:
self.set_method(conf)
else:
conf = self.conf
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = get_default(obj_args, self.obj_args)
timer = | Timer(start=True) | sfepy.base.timing.Timer |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = get_default(obj_args, self.obj_args)
if conf.output:
globals()['output'] = conf.output
output('entering optimization loop...')
nc_of, tt_of, fn_of = wrap_function(obj_fun, obj_args)
nc_ofg, tt_ofg, fn_ofg = wrap_function(obj_fun_grad, obj_args)
timer = Timer()
time_stats = {'of' : tt_of, 'ofg': tt_ofg, 'check' : []}
ofg = None
it = 0
xit = x0.copy()
while 1:
of = fn_of(xit)
if it == 0:
of0 = ofit0 = of_prev = of
of_prev_prev = of + 5000.0
if ofg is None:
ofg = fn_ofg(xit)
if conf.check:
timer.start()
check_gradient(xit, ofg, fn_of, conf.delta, conf.check)
time_stats['check'].append(timer.stop())
ofg_norm = nla.norm(ofg, conf.norm)
ret = conv_test(conf, it, of, ofit0, ofg_norm)
if ret >= 0:
break
ofit0 = of
##
# Backtrack (on errors).
alpha = conf.ls0
can_ls = True
while 1:
xit2 = xit - alpha * ofg
aux = fn_of(xit2)
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if aux is None:
alpha *= conf.ls_red_warp
can_ls = False
output('warp: reducing step (%f)' % alpha)
elif conf.ls and conf.ls_method == 'backtracking':
if aux < of * conf.ls_on: break
alpha *= conf.ls_red
output('backtracking: reducing step (%f)' % alpha)
else:
of_prev_prev = of_prev
of_prev = aux
break
if alpha < conf.ls_min:
if aux is None:
raise RuntimeError('giving up...')
output('linesearch failed, continuing anyway')
break
# These values are modified by the line search, even if it fails
of_prev_bak = of_prev
of_prev_prev_bak = of_prev_prev
if conf.ls and can_ls and conf.ls_method == 'full':
| output('full linesearch...') | sfepy.base.base.output |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = get_default(obj_args, self.obj_args)
if conf.output:
globals()['output'] = conf.output
output('entering optimization loop...')
nc_of, tt_of, fn_of = wrap_function(obj_fun, obj_args)
nc_ofg, tt_ofg, fn_ofg = wrap_function(obj_fun_grad, obj_args)
timer = Timer()
time_stats = {'of' : tt_of, 'ofg': tt_ofg, 'check' : []}
ofg = None
it = 0
xit = x0.copy()
while 1:
of = fn_of(xit)
if it == 0:
of0 = ofit0 = of_prev = of
of_prev_prev = of + 5000.0
if ofg is None:
ofg = fn_ofg(xit)
if conf.check:
timer.start()
check_gradient(xit, ofg, fn_of, conf.delta, conf.check)
time_stats['check'].append(timer.stop())
ofg_norm = nla.norm(ofg, conf.norm)
ret = conv_test(conf, it, of, ofit0, ofg_norm)
if ret >= 0:
break
ofit0 = of
##
# Backtrack (on errors).
alpha = conf.ls0
can_ls = True
while 1:
xit2 = xit - alpha * ofg
aux = fn_of(xit2)
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if aux is None:
alpha *= conf.ls_red_warp
can_ls = False
output('warp: reducing step (%f)' % alpha)
elif conf.ls and conf.ls_method == 'backtracking':
if aux < of * conf.ls_on: break
alpha *= conf.ls_red
output('backtracking: reducing step (%f)' % alpha)
else:
of_prev_prev = of_prev
of_prev = aux
break
if alpha < conf.ls_min:
if aux is None:
raise RuntimeError('giving up...')
output('linesearch failed, continuing anyway')
break
# These values are modified by the line search, even if it fails
of_prev_bak = of_prev
of_prev_prev_bak = of_prev_prev
if conf.ls and can_ls and conf.ls_method == 'full':
output('full linesearch...')
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
linesearch.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev,of_prev_prev,
c2=0.4)
if alpha is None: # line search failed -- use different one.
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
sopt.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev_bak,
of_prev_prev_bak)
if alpha is None or alpha == 0:
# This line search also failed to find a better
# solution.
ret = 3
break
| output(' -> alpha: %.8e' % alpha) | sfepy.base.base.output |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = get_default(obj_args, self.obj_args)
if conf.output:
globals()['output'] = conf.output
output('entering optimization loop...')
nc_of, tt_of, fn_of = wrap_function(obj_fun, obj_args)
nc_ofg, tt_ofg, fn_ofg = wrap_function(obj_fun_grad, obj_args)
timer = Timer()
time_stats = {'of' : tt_of, 'ofg': tt_ofg, 'check' : []}
ofg = None
it = 0
xit = x0.copy()
while 1:
of = fn_of(xit)
if it == 0:
of0 = ofit0 = of_prev = of
of_prev_prev = of + 5000.0
if ofg is None:
ofg = fn_ofg(xit)
if conf.check:
timer.start()
check_gradient(xit, ofg, fn_of, conf.delta, conf.check)
time_stats['check'].append(timer.stop())
ofg_norm = nla.norm(ofg, conf.norm)
ret = conv_test(conf, it, of, ofit0, ofg_norm)
if ret >= 0:
break
ofit0 = of
##
# Backtrack (on errors).
alpha = conf.ls0
can_ls = True
while 1:
xit2 = xit - alpha * ofg
aux = fn_of(xit2)
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if aux is None:
alpha *= conf.ls_red_warp
can_ls = False
| output('warp: reducing step (%f)' % alpha) | sfepy.base.base.output |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = get_default(obj_args, self.obj_args)
if conf.output:
globals()['output'] = conf.output
output('entering optimization loop...')
nc_of, tt_of, fn_of = wrap_function(obj_fun, obj_args)
nc_ofg, tt_ofg, fn_ofg = wrap_function(obj_fun_grad, obj_args)
timer = Timer()
time_stats = {'of' : tt_of, 'ofg': tt_ofg, 'check' : []}
ofg = None
it = 0
xit = x0.copy()
while 1:
of = fn_of(xit)
if it == 0:
of0 = ofit0 = of_prev = of
of_prev_prev = of + 5000.0
if ofg is None:
ofg = fn_ofg(xit)
if conf.check:
timer.start()
check_gradient(xit, ofg, fn_of, conf.delta, conf.check)
time_stats['check'].append(timer.stop())
ofg_norm = nla.norm(ofg, conf.norm)
ret = conv_test(conf, it, of, ofit0, ofg_norm)
if ret >= 0:
break
ofit0 = of
##
# Backtrack (on errors).
alpha = conf.ls0
can_ls = True
while 1:
xit2 = xit - alpha * ofg
aux = fn_of(xit2)
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if aux is None:
alpha *= conf.ls_red_warp
can_ls = False
output('warp: reducing step (%f)' % alpha)
elif conf.ls and conf.ls_method == 'backtracking':
if aux < of * conf.ls_on: break
alpha *= conf.ls_red
output('backtracking: reducing step (%f)' % alpha)
else:
of_prev_prev = of_prev
of_prev = aux
break
if alpha < conf.ls_min:
if aux is None:
raise RuntimeError('giving up...')
| output('linesearch failed, continuing anyway') | sfepy.base.base.output |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = get_default(obj_args, self.obj_args)
if conf.output:
globals()['output'] = conf.output
output('entering optimization loop...')
nc_of, tt_of, fn_of = wrap_function(obj_fun, obj_args)
nc_ofg, tt_ofg, fn_ofg = wrap_function(obj_fun_grad, obj_args)
timer = Timer()
time_stats = {'of' : tt_of, 'ofg': tt_ofg, 'check' : []}
ofg = None
it = 0
xit = x0.copy()
while 1:
of = fn_of(xit)
if it == 0:
of0 = ofit0 = of_prev = of
of_prev_prev = of + 5000.0
if ofg is None:
ofg = fn_ofg(xit)
if conf.check:
timer.start()
check_gradient(xit, ofg, fn_of, conf.delta, conf.check)
time_stats['check'].append(timer.stop())
ofg_norm = nla.norm(ofg, conf.norm)
ret = conv_test(conf, it, of, ofit0, ofg_norm)
if ret >= 0:
break
ofit0 = of
##
# Backtrack (on errors).
alpha = conf.ls0
can_ls = True
while 1:
xit2 = xit - alpha * ofg
aux = fn_of(xit2)
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if aux is None:
alpha *= conf.ls_red_warp
can_ls = False
output('warp: reducing step (%f)' % alpha)
elif conf.ls and conf.ls_method == 'backtracking':
if aux < of * conf.ls_on: break
alpha *= conf.ls_red
output('backtracking: reducing step (%f)' % alpha)
else:
of_prev_prev = of_prev
of_prev = aux
break
if alpha < conf.ls_min:
if aux is None:
raise RuntimeError('giving up...')
output('linesearch failed, continuing anyway')
break
# These values are modified by the line search, even if it fails
of_prev_bak = of_prev
of_prev_prev_bak = of_prev_prev
if conf.ls and can_ls and conf.ls_method == 'full':
output('full linesearch...')
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
linesearch.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev,of_prev_prev,
c2=0.4)
if alpha is None: # line search failed -- use different one.
alpha, fc, gc, of_prev, of_prev_prev, ofg1 = \
sopt.line_search(fn_of,fn_ofg,xit,
-ofg,ofg,of_prev_bak,
of_prev_prev_bak)
if alpha is None or alpha == 0:
# This line search also failed to find a better
# solution.
ret = 3
break
output(' -> alpha: %.8e' % alpha)
else:
if conf.ls_method == 'full':
output('full linesearch off (%s and %s)'
% (conf.ls, can_ls))
ofg1 = None
if self.log is not None:
self.log.plot_vlines(color='g', linewidth=0.5)
xit = xit - alpha * ofg
if ofg1 is None:
ofg = None
else:
ofg = ofg1.copy()
for key, val in six.iteritems(time_stats):
if len(val):
| output('%10s: %7.2f [s]' % (key, val[-1])) | sfepy.base.base.output |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import output, get_default, pause, Struct
from sfepy.base.log import Log, get_logging_conf
from sfepy.base.timing import Timer
from sfepy.solvers.solvers import OptimizationSolver
import scipy.optimize as sopt
import scipy.optimize.linesearch as linesearch
import six
from six.moves import range
def conv_test(conf, it, of, of0, ofg_norm=None):
"""
Returns
-------
flag : int
* -1 ... continue
* 0 ... small OF -> stop
* 1 ... i_max reached -> stop
* 2 ... small OFG -> stop
* 3 ... small relative decrase of OF
"""
status = -1
output('opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm))
if (abs(of) < conf.eps_of):
status = 0
elif ofg_norm and (ofg_norm < conf.eps_ofg):
status = 2
elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs(of0))):
status = 3
if (status == -1) and (it >= conf.i_max):
status = 1
return status
def wrap_function(function, args):
ncalls = [0]
times = []
timer = Timer()
def function_wrapper(x):
ncalls[0] += 1
timer.start()
out = function(x, *args)
times.append(timer.stop())
return out
return ncalls, times, function_wrapper
def check_gradient(xit, aofg, fn_of, delta, check):
dofg = nm.zeros_like(aofg)
xd = xit.copy()
for ii in range(xit.shape[0]):
xd[ii] = xit[ii] + delta
ofp = fn_of(xd)
xd[ii] = xit[ii] - delta
ofm = fn_of(xd)
xd[ii] = xit[ii]
dofg[ii] = 0.5 * (ofp - ofm) / delta
output('**********', ii, aofg[ii], dofg[ii])
diff = abs(aofg - dofg)
aux = nm.concatenate((aofg[:,nm.newaxis], dofg[:,nm.newaxis],
diff[:,nm.newaxis]), 1)
output(aux)
output(nla.norm(diff, nm.Inf))
aofg.tofile('aofg.txt', ' ')
dofg.tofile('dofg.txt', ' ')
diff.tofile('diff.txt', ' ')
if check == 2:
import pylab
pylab.plot(aofg)
pylab.plot(dofg)
pylab.legend(('analytical', 'finite difference'))
pylab.show()
pause('gradient checking done')
class FMinSteepestDescent(OptimizationSolver):
"""
Steepest descent optimization solver.
"""
name = 'opt.fmin_sd'
_parameters = [
('i_max', 'int', 10, False,
'The maximum number of iterations.'),
('eps_rd', 'float', 1e-5, False,
'The relative delta of the objective function.'),
('eps_of', 'float', 1e-4, False,
'The tolerance for the objective function.'),
('eps_ofg', 'float', 1e-8, False,
'The tolerance for the objective function gradient.'),
('norm', 'numpy norm', nm.Inf, False,
'The norm to be used.'),
('ls', 'bool', True, False,
'If True, use a line-search.'),
('ls_method', "{'backtracking', 'full'}", 'backtracking', False,
'The line-search method.'),
('ls_on', 'float', 0.99999, False,
"""Start the backtracking line-search by reducing the step, if
:math:`||f(x^i)|| / ||f(x^{i-1})||` is larger than `ls_on`."""),
('ls0', '0.0 < float < 1.0', 1.0, False,
'The initial step.'),
('ls_red', '0.0 < float < 1.0', 0.5, False,
'The step reduction factor in case of correct residual assembling.'),
('ls_red_warp', '0.0 < float < 1.0', 0.1, False,
"""The step reduction factor in case of failed residual assembling
(e.g. the "warp violation" error caused by a negative volume
element resulting from too large deformations)."""),
('ls_min', '0.0 < float < 1.0', 1e-5, False,
'The minimum step reduction factor.'),
('check', '0, 1 or 2', 0, False,
"""If >= 1, check the tangent matrix using finite differences. If 2,
plot the resulting sparsity patterns."""),
('delta', 'float', 1e-6, False,
r"""If `check >= 1`, the finite difference matrix is taken as
:math:`A_{ij} = \frac{f_i(x_j + \delta) - f_i(x_j - \delta)}{2
\delta}`."""),
('output', 'function', None, False,
"""If given, use it instead of :func:`output()
<sfepy.base.base.output()>` function."""),
('yscales', 'list of str', ['linear', 'log', 'log', 'linear'], False,
'The list of four convergence log subplot scales.'),
('log', 'dict or None', None, False,
"""If not None, log the convergence according to the configuration in
the following form: ``{'text' : 'log.txt', 'plot' : 'log.pdf'}``.
Each of the dict items can be None."""),
]
def __init__(self, conf, **kwargs):
OptimizationSolver.__init__(self, conf, **kwargs)
conf = self.conf
log = get_logging_conf(conf)
conf.log = log = Struct(name='log_conf', **log)
conf.is_any_log = (log.text is not None) or (log.plot is not None)
if conf.is_any_log:
self.log = Log([[r'$||\Psi||$'], [r'$||\nabla \Psi||$'],
[r'$\alpha$'], ['iteration']],
xlabels=['', '', 'all iterations', 'all iterations'],
yscales=conf.yscales,
is_plot=conf.log.plot is not None,
log_filename=conf.log.text,
formats=[['%.8e'], ['%.3e'], ['%.3e'], ['%d']])
else:
self.log = None
def __call__(self, x0, conf=None, obj_fun=None, obj_fun_grad=None,
status=None, obj_args=None):
conf = get_default(conf, self.conf)
obj_fun = get_default(obj_fun, self.obj_fun)
obj_fun_grad = get_default(obj_fun_grad, self.obj_fun_grad)
status = get_default(status, self.status)
obj_args = get_default(obj_args, self.obj_args)
if conf.output:
globals()['output'] = conf.output
output('entering optimization loop...')
nc_of, tt_of, fn_of = wrap_function(obj_fun, obj_args)
nc_ofg, tt_ofg, fn_ofg = wrap_function(obj_fun_grad, obj_args)
timer = Timer()
time_stats = {'of' : tt_of, 'ofg': tt_ofg, 'check' : []}
ofg = None
it = 0
xit = x0.copy()
while 1:
of = fn_of(xit)
if it == 0:
of0 = ofit0 = of_prev = of
of_prev_prev = of + 5000.0
if ofg is None:
ofg = fn_ofg(xit)
if conf.check:
timer.start()
check_gradient(xit, ofg, fn_of, conf.delta, conf.check)
time_stats['check'].append(timer.stop())
ofg_norm = nla.norm(ofg, conf.norm)
ret = conv_test(conf, it, of, ofit0, ofg_norm)
if ret >= 0:
break
ofit0 = of
##
# Backtrack (on errors).
alpha = conf.ls0
can_ls = True
while 1:
xit2 = xit - alpha * ofg
aux = fn_of(xit2)
if self.log is not None:
self.log(of, ofg_norm, alpha, it)
if aux is None:
alpha *= conf.ls_red_warp
can_ls = False
output('warp: reducing step (%f)' % alpha)
elif conf.ls and conf.ls_method == 'backtracking':
if aux < of * conf.ls_on: break
alpha *= conf.ls_red
| output('backtracking: reducing step (%f)' % alpha) | sfepy.base.base.output |
"""
Module for handling state variables.
"""
import numpy as nm
from sfepy.base.base import Struct
class State(Struct):
"""
Class holding/manipulating the state variables and corresponding DOF
vectors.
Manipulating the state class changes the underlying variables, and
hence also the corresponding equations/terms (if any).
Notes
-----
This class allows working with LCBC conditions in time-dependent
problems, as it keeps track of the reduced DOF vector that cannot
be reconstructed from the full DOF vector by using the usual
`variables.strip_state_vector()`.
"""
@staticmethod
def from_variables(variables):
"""
Create a State instance for the given variables.
The DOF vector is created using the DOF data in `variables`.
Parameters
----------
variables : Variables instance
The variables.
"""
parts = variables.get_state_parts()
vec = variables.create_state_vector()
for key, part in parts.iteritems():
indx = variables.get_indx(key)
vec[indx] = part
return State(variables, vec)
def __init__(self, variables, vec=None, preserve_caches=False):
"""
Create a State instance for the given variables.
Parameters
----------
variables : Variables instance
The variables.
vec : array, optional
The (initial) DOF vector corresponding to the variables.
preserve_caches : bool
If True, do not invalidate evaluate caches of variables.
"""
| Struct.__init__(self, variables=variables, vec=vec, r_vec=None) | sfepy.base.base.Struct.__init__ |
from sfepy.terms.extmods import terms
from sfepy.terms.cache import DataCache
from sfepy.base.base import nm, pause, debug
class ExpHistoryDataCache(DataCache):
"""History for exponential decay convolution kernels.
The decay argument is F(\Delta t), F(t_0=0) is assumed to be 1.0.
"""
name = 'exp_history'
arg_types = ('decay', 'values')
def __init__(self, name, arg_names, history_sizes=None):
DataCache.__init__(self, name, arg_names,
['history', 'increment', 'decay'], history_sizes)
def init_data(self, key, ckey, term, **kwargs):
decay, values = self.get_args(**kwargs)
shape = values.shape
self.shapes = {
'history' : shape,
'increment' : shape,
'decay' : decay.shape,
}
| DataCache.init_datas(self, ckey, self.shapes, zero=True) | sfepy.terms.cache.DataCache.init_datas |
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import assert_
from sfepy.linalg import norm_l2_along_axis as norm
from sfepy.linalg import dot_sequences, insert_strided_axis
from sfepy.discrete.fem.poly_spaces import PolySpace
from sfepy.discrete.fem.mappings import VolumeMapping
from sfepy.mechanics.tensors import dim2sym
from six.moves import range
def create_transformation_matrix(coors):
"""
Create a transposed coordinate transformation matrix, that
transforms 3D coordinates of element face nodes so that the
transformed nodes are in the `x-y` plane. The rotation is performed
w.r.t. the first node of each face.
Parameters
----------
coors : array
The coordinates of element nodes, shape `(n_el, n_ep, dim)`.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, i.e.
:math:`X_{inplane} = T^T X_{3D}`.
Notes
-----
:math:`T = [t_1, t_2, n]`, where :math:`t_1`, :math:`t_2`, are unit
in-plane (column) vectors and :math:`n` is the unit normal vector,
all mutually orthonormal.
"""
# Local coordinate system.
t1 = coors[:, 1, :] - coors[:, 0, :]
t2 = coors[:, -1, :] - coors[:, 0, :]
n = nm.cross(t1, t2)
t2 = nm.cross(n, t1)
t1 = t1 / norm(t1)[:, None]
t2 = t2 / norm(t2)[:, None]
n = n / norm(n)[:, None]
# Coordinate transformation matrix (transposed!).
mtx_t = nm.concatenate((t1[:, :, None],
t2[:, :, None],
n[:, :, None]), axis=2)
return mtx_t
def transform_asm_vectors(out, mtx_t):
"""
Transform vector assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of vectors, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[2] // mtx_t.shape[2]
for iep in range(n_ep):
ir = slice(iep, None, n_ep)
fn = out[:, 0, ir, 0]
fn[:] = dot_sequences(mtx_t, fn, 'AB')
def transform_asm_matrices(out, mtx_t):
"""
Transform matrix assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of matrices, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[-1] // mtx_t.shape[-1]
for iepr in range(n_ep):
ir = slice(iepr, None, n_ep)
for iepc in range(n_ep):
ic = slice(iepc, None, n_ep)
fn = out[:, 0, ir, ic]
fn[:] = dot_sequences(dot_sequences(mtx_t, fn, 'AB'), mtx_t, 'ABT')
def create_mapping(coors, gel, order):
"""
Create mapping from transformed (in `x-y` plane) element faces to
reference element faces.
Parameters
----------
coors : array
The transformed coordinates of element nodes, shape `(n_el,
n_ep, dim)`. The function verifies that the all `z` components
are zero.
gel : GeometryElement instance
The geometry element corresponding to the faces.
order : int
The polynomial order of the mapping.
Returns
-------
mapping : VolumeMapping instance
The reference element face mapping.
"""
# Strip 'z' component (should be 0 now...).
assert_(nm.allclose(coors[:, :, -1], 0.0, rtol=1e-12, atol=1e-12))
coors = coors[:, :, :-1].copy()
# Mapping from transformed element to reference element.
sh = coors.shape
seq_coors = coors.reshape((sh[0] * sh[1], sh[2]))
seq_conn = nm.arange(seq_coors.shape[0], dtype=nm.int32)
seq_conn.shape = sh[:2]
mapping = | VolumeMapping(seq_coors, seq_conn, gel=gel, order=1) | sfepy.discrete.fem.mappings.VolumeMapping |
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import assert_
from sfepy.linalg import norm_l2_along_axis as norm
from sfepy.linalg import dot_sequences, insert_strided_axis
from sfepy.discrete.fem.poly_spaces import PolySpace
from sfepy.discrete.fem.mappings import VolumeMapping
from sfepy.mechanics.tensors import dim2sym
from six.moves import range
def create_transformation_matrix(coors):
"""
Create a transposed coordinate transformation matrix, that
transforms 3D coordinates of element face nodes so that the
transformed nodes are in the `x-y` plane. The rotation is performed
w.r.t. the first node of each face.
Parameters
----------
coors : array
The coordinates of element nodes, shape `(n_el, n_ep, dim)`.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, i.e.
:math:`X_{inplane} = T^T X_{3D}`.
Notes
-----
:math:`T = [t_1, t_2, n]`, where :math:`t_1`, :math:`t_2`, are unit
in-plane (column) vectors and :math:`n` is the unit normal vector,
all mutually orthonormal.
"""
# Local coordinate system.
t1 = coors[:, 1, :] - coors[:, 0, :]
t2 = coors[:, -1, :] - coors[:, 0, :]
n = nm.cross(t1, t2)
t2 = nm.cross(n, t1)
t1 = t1 / norm(t1)[:, None]
t2 = t2 / norm(t2)[:, None]
n = n / norm(n)[:, None]
# Coordinate transformation matrix (transposed!).
mtx_t = nm.concatenate((t1[:, :, None],
t2[:, :, None],
n[:, :, None]), axis=2)
return mtx_t
def transform_asm_vectors(out, mtx_t):
"""
Transform vector assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of vectors, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[2] // mtx_t.shape[2]
for iep in range(n_ep):
ir = slice(iep, None, n_ep)
fn = out[:, 0, ir, 0]
fn[:] = dot_sequences(mtx_t, fn, 'AB')
def transform_asm_matrices(out, mtx_t):
"""
Transform matrix assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of matrices, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[-1] // mtx_t.shape[-1]
for iepr in range(n_ep):
ir = slice(iepr, None, n_ep)
for iepc in range(n_ep):
ic = slice(iepc, None, n_ep)
fn = out[:, 0, ir, ic]
fn[:] = dot_sequences(dot_sequences(mtx_t, fn, 'AB'), mtx_t, 'ABT')
def create_mapping(coors, gel, order):
"""
Create mapping from transformed (in `x-y` plane) element faces to
reference element faces.
Parameters
----------
coors : array
The transformed coordinates of element nodes, shape `(n_el,
n_ep, dim)`. The function verifies that the all `z` components
are zero.
gel : GeometryElement instance
The geometry element corresponding to the faces.
order : int
The polynomial order of the mapping.
Returns
-------
mapping : VolumeMapping instance
The reference element face mapping.
"""
# Strip 'z' component (should be 0 now...).
assert_(nm.allclose(coors[:, :, -1], 0.0, rtol=1e-12, atol=1e-12))
coors = coors[:, :, :-1].copy()
# Mapping from transformed element to reference element.
sh = coors.shape
seq_coors = coors.reshape((sh[0] * sh[1], sh[2]))
seq_conn = nm.arange(seq_coors.shape[0], dtype=nm.int32)
seq_conn.shape = sh[:2]
mapping = VolumeMapping(seq_coors, seq_conn, gel=gel, order=1)
return mapping
def describe_geometry(field, region, integral):
"""
Describe membrane geometry in a given region.
Parameters
----------
field : Field instance
The field defining the FE approximation.
region : Region instance
The surface region to describe.
integral : Integral instance
The integral defining the quadrature points.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
membrane_geo : CMapping instance
The mapping from transformed elements to a reference elements.
"""
# Coordinates of element vertices.
sg, _ = field.get_mapping(region, integral, 'surface')
sd = field.surface_data[region.name]
coors = field.coors[sd.econn[:, :sg.n_ep]]
# Coordinate transformation matrix (transposed!).
mtx_t = create_transformation_matrix(coors)
# Transform coordinates to the local coordinate system.
coors_loc = dot_sequences((coors - coors[:, 0:1, :]), mtx_t)
# Mapping from transformed elements to reference elements.
gel = field.gel.surface_facet
vm = create_mapping(coors_loc, gel, 1)
qp = integral.get_qp(gel.name)
ps = | PolySpace.any_from_args(None, gel, field.approx_order) | sfepy.discrete.fem.poly_spaces.PolySpace.any_from_args |
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import assert_
from sfepy.linalg import norm_l2_along_axis as norm
from sfepy.linalg import dot_sequences, insert_strided_axis
from sfepy.discrete.fem.poly_spaces import PolySpace
from sfepy.discrete.fem.mappings import VolumeMapping
from sfepy.mechanics.tensors import dim2sym
from six.moves import range
def create_transformation_matrix(coors):
"""
Create a transposed coordinate transformation matrix, that
transforms 3D coordinates of element face nodes so that the
transformed nodes are in the `x-y` plane. The rotation is performed
w.r.t. the first node of each face.
Parameters
----------
coors : array
The coordinates of element nodes, shape `(n_el, n_ep, dim)`.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, i.e.
:math:`X_{inplane} = T^T X_{3D}`.
Notes
-----
:math:`T = [t_1, t_2, n]`, where :math:`t_1`, :math:`t_2`, are unit
in-plane (column) vectors and :math:`n` is the unit normal vector,
all mutually orthonormal.
"""
# Local coordinate system.
t1 = coors[:, 1, :] - coors[:, 0, :]
t2 = coors[:, -1, :] - coors[:, 0, :]
n = nm.cross(t1, t2)
t2 = nm.cross(n, t1)
t1 = t1 / norm(t1)[:, None]
t2 = t2 / norm(t2)[:, None]
n = n / norm(n)[:, None]
# Coordinate transformation matrix (transposed!).
mtx_t = nm.concatenate((t1[:, :, None],
t2[:, :, None],
n[:, :, None]), axis=2)
return mtx_t
def transform_asm_vectors(out, mtx_t):
"""
Transform vector assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of vectors, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[2] // mtx_t.shape[2]
for iep in range(n_ep):
ir = slice(iep, None, n_ep)
fn = out[:, 0, ir, 0]
fn[:] = dot_sequences(mtx_t, fn, 'AB')
def transform_asm_matrices(out, mtx_t):
"""
Transform matrix assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of matrices, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[-1] // mtx_t.shape[-1]
for iepr in range(n_ep):
ir = slice(iepr, None, n_ep)
for iepc in range(n_ep):
ic = slice(iepc, None, n_ep)
fn = out[:, 0, ir, ic]
fn[:] = dot_sequences(dot_sequences(mtx_t, fn, 'AB'), mtx_t, 'ABT')
def create_mapping(coors, gel, order):
"""
Create mapping from transformed (in `x-y` plane) element faces to
reference element faces.
Parameters
----------
coors : array
The transformed coordinates of element nodes, shape `(n_el,
n_ep, dim)`. The function verifies that the all `z` components
are zero.
gel : GeometryElement instance
The geometry element corresponding to the faces.
order : int
The polynomial order of the mapping.
Returns
-------
mapping : VolumeMapping instance
The reference element face mapping.
"""
# Strip 'z' component (should be 0 now...).
assert_(nm.allclose(coors[:, :, -1], 0.0, rtol=1e-12, atol=1e-12))
coors = coors[:, :, :-1].copy()
# Mapping from transformed element to reference element.
sh = coors.shape
seq_coors = coors.reshape((sh[0] * sh[1], sh[2]))
seq_conn = nm.arange(seq_coors.shape[0], dtype=nm.int32)
seq_conn.shape = sh[:2]
mapping = VolumeMapping(seq_coors, seq_conn, gel=gel, order=1)
return mapping
def describe_geometry(field, region, integral):
"""
Describe membrane geometry in a given region.
Parameters
----------
field : Field instance
The field defining the FE approximation.
region : Region instance
The surface region to describe.
integral : Integral instance
The integral defining the quadrature points.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
membrane_geo : CMapping instance
The mapping from transformed elements to a reference elements.
"""
# Coordinates of element vertices.
sg, _ = field.get_mapping(region, integral, 'surface')
sd = field.surface_data[region.name]
coors = field.coors[sd.econn[:, :sg.n_ep]]
# Coordinate transformation matrix (transposed!).
mtx_t = create_transformation_matrix(coors)
# Transform coordinates to the local coordinate system.
coors_loc = dot_sequences((coors - coors[:, 0:1, :]), mtx_t)
# Mapping from transformed elements to reference elements.
gel = field.gel.surface_facet
vm = create_mapping(coors_loc, gel, 1)
qp = integral.get_qp(gel.name)
ps = PolySpace.any_from_args(None, gel, field.approx_order)
membrane_geo = vm.get_mapping(qp[0], qp[1], poly_space=ps)
membrane_geo.bf[:] = ps.eval_base(qp[0])
return mtx_t, membrane_geo
def describe_deformation(el_disps, bfg):
"""
Describe deformation of a thin incompressible 2D membrane in 3D
space, composed of flat finite element faces.
The coordinate system of each element (face), i.e. the membrane
mid-surface, should coincide with the `x`, `y` axes of the `x-y`
plane.
Parameters
----------
el_disps : array
The displacements of element nodes, shape `(n_el, n_ep, dim)`.
bfg : array
The in-plane base function gradients, shape `(n_el, n_qp, dim-1,
n_ep)`.
Returns
-------
mtx_c ; array
The in-plane right Cauchy-Green deformation tensor
:math:`C_{ij}`, :math:`i, j = 1, 2`.
c33 : array
The component :math:`C_{33}` computed from the incompressibility
condition.
mtx_b : array
The discrete Green strain variation operator.
"""
sh = bfg.shape
n_ep = sh[3]
dim = el_disps.shape[2]
sym2 = | dim2sym(dim-1) | sfepy.mechanics.tensors.dim2sym |
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import assert_
from sfepy.linalg import norm_l2_along_axis as norm
from sfepy.linalg import dot_sequences, insert_strided_axis
from sfepy.discrete.fem.poly_spaces import PolySpace
from sfepy.discrete.fem.mappings import VolumeMapping
from sfepy.mechanics.tensors import dim2sym
from six.moves import range
def create_transformation_matrix(coors):
"""
Create a transposed coordinate transformation matrix, that
transforms 3D coordinates of element face nodes so that the
transformed nodes are in the `x-y` plane. The rotation is performed
w.r.t. the first node of each face.
Parameters
----------
coors : array
The coordinates of element nodes, shape `(n_el, n_ep, dim)`.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, i.e.
:math:`X_{inplane} = T^T X_{3D}`.
Notes
-----
:math:`T = [t_1, t_2, n]`, where :math:`t_1`, :math:`t_2`, are unit
in-plane (column) vectors and :math:`n` is the unit normal vector,
all mutually orthonormal.
"""
# Local coordinate system.
t1 = coors[:, 1, :] - coors[:, 0, :]
t2 = coors[:, -1, :] - coors[:, 0, :]
n = nm.cross(t1, t2)
t2 = nm.cross(n, t1)
t1 = t1 / norm(t1)[:, None]
t2 = t2 / norm(t2)[:, None]
n = n / norm(n)[:, None]
# Coordinate transformation matrix (transposed!).
mtx_t = nm.concatenate((t1[:, :, None],
t2[:, :, None],
n[:, :, None]), axis=2)
return mtx_t
def transform_asm_vectors(out, mtx_t):
"""
Transform vector assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of vectors, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[2] // mtx_t.shape[2]
for iep in range(n_ep):
ir = slice(iep, None, n_ep)
fn = out[:, 0, ir, 0]
fn[:] = dot_sequences(mtx_t, fn, 'AB')
def transform_asm_matrices(out, mtx_t):
"""
Transform matrix assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of matrices, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[-1] // mtx_t.shape[-1]
for iepr in range(n_ep):
ir = slice(iepr, None, n_ep)
for iepc in range(n_ep):
ic = slice(iepc, None, n_ep)
fn = out[:, 0, ir, ic]
fn[:] = dot_sequences(dot_sequences(mtx_t, fn, 'AB'), mtx_t, 'ABT')
def create_mapping(coors, gel, order):
"""
Create mapping from transformed (in `x-y` plane) element faces to
reference element faces.
Parameters
----------
coors : array
The transformed coordinates of element nodes, shape `(n_el,
n_ep, dim)`. The function verifies that the all `z` components
are zero.
gel : GeometryElement instance
The geometry element corresponding to the faces.
order : int
The polynomial order of the mapping.
Returns
-------
mapping : VolumeMapping instance
The reference element face mapping.
"""
# Strip 'z' component (should be 0 now...).
assert_(nm.allclose(coors[:, :, -1], 0.0, rtol=1e-12, atol=1e-12))
coors = coors[:, :, :-1].copy()
# Mapping from transformed element to reference element.
sh = coors.shape
seq_coors = coors.reshape((sh[0] * sh[1], sh[2]))
seq_conn = nm.arange(seq_coors.shape[0], dtype=nm.int32)
seq_conn.shape = sh[:2]
mapping = VolumeMapping(seq_coors, seq_conn, gel=gel, order=1)
return mapping
def describe_geometry(field, region, integral):
"""
Describe membrane geometry in a given region.
Parameters
----------
field : Field instance
The field defining the FE approximation.
region : Region instance
The surface region to describe.
integral : Integral instance
The integral defining the quadrature points.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
membrane_geo : CMapping instance
The mapping from transformed elements to a reference elements.
"""
# Coordinates of element vertices.
sg, _ = field.get_mapping(region, integral, 'surface')
sd = field.surface_data[region.name]
coors = field.coors[sd.econn[:, :sg.n_ep]]
# Coordinate transformation matrix (transposed!).
mtx_t = create_transformation_matrix(coors)
# Transform coordinates to the local coordinate system.
coors_loc = dot_sequences((coors - coors[:, 0:1, :]), mtx_t)
# Mapping from transformed elements to reference elements.
gel = field.gel.surface_facet
vm = create_mapping(coors_loc, gel, 1)
qp = integral.get_qp(gel.name)
ps = PolySpace.any_from_args(None, gel, field.approx_order)
membrane_geo = vm.get_mapping(qp[0], qp[1], poly_space=ps)
membrane_geo.bf[:] = ps.eval_base(qp[0])
return mtx_t, membrane_geo
def describe_deformation(el_disps, bfg):
"""
Describe deformation of a thin incompressible 2D membrane in 3D
space, composed of flat finite element faces.
The coordinate system of each element (face), i.e. the membrane
mid-surface, should coincide with the `x`, `y` axes of the `x-y`
plane.
Parameters
----------
el_disps : array
The displacements of element nodes, shape `(n_el, n_ep, dim)`.
bfg : array
The in-plane base function gradients, shape `(n_el, n_qp, dim-1,
n_ep)`.
Returns
-------
mtx_c ; array
The in-plane right Cauchy-Green deformation tensor
:math:`C_{ij}`, :math:`i, j = 1, 2`.
c33 : array
The component :math:`C_{33}` computed from the incompressibility
condition.
mtx_b : array
The discrete Green strain variation operator.
"""
sh = bfg.shape
n_ep = sh[3]
dim = el_disps.shape[2]
sym2 = dim2sym(dim-1)
# Repeat el_disps by number of quadrature points.
el_disps_qp = | insert_strided_axis(el_disps, 1, bfg.shape[1]) | sfepy.linalg.insert_strided_axis |
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import assert_
from sfepy.linalg import norm_l2_along_axis as norm
from sfepy.linalg import dot_sequences, insert_strided_axis
from sfepy.discrete.fem.poly_spaces import PolySpace
from sfepy.discrete.fem.mappings import VolumeMapping
from sfepy.mechanics.tensors import dim2sym
from six.moves import range
def create_transformation_matrix(coors):
"""
Create a transposed coordinate transformation matrix, that
transforms 3D coordinates of element face nodes so that the
transformed nodes are in the `x-y` plane. The rotation is performed
w.r.t. the first node of each face.
Parameters
----------
coors : array
The coordinates of element nodes, shape `(n_el, n_ep, dim)`.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, i.e.
:math:`X_{inplane} = T^T X_{3D}`.
Notes
-----
:math:`T = [t_1, t_2, n]`, where :math:`t_1`, :math:`t_2`, are unit
in-plane (column) vectors and :math:`n` is the unit normal vector,
all mutually orthonormal.
"""
# Local coordinate system.
t1 = coors[:, 1, :] - coors[:, 0, :]
t2 = coors[:, -1, :] - coors[:, 0, :]
n = nm.cross(t1, t2)
t2 = nm.cross(n, t1)
t1 = t1 / norm(t1)[:, None]
t2 = t2 / norm(t2)[:, None]
n = n / norm(n)[:, None]
# Coordinate transformation matrix (transposed!).
mtx_t = nm.concatenate((t1[:, :, None],
t2[:, :, None],
n[:, :, None]), axis=2)
return mtx_t
def transform_asm_vectors(out, mtx_t):
"""
Transform vector assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of vectors, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[2] // mtx_t.shape[2]
for iep in range(n_ep):
ir = slice(iep, None, n_ep)
fn = out[:, 0, ir, 0]
fn[:] = dot_sequences(mtx_t, fn, 'AB')
def transform_asm_matrices(out, mtx_t):
"""
Transform matrix assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of matrices, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[-1] // mtx_t.shape[-1]
for iepr in range(n_ep):
ir = slice(iepr, None, n_ep)
for iepc in range(n_ep):
ic = slice(iepc, None, n_ep)
fn = out[:, 0, ir, ic]
fn[:] = dot_sequences(dot_sequences(mtx_t, fn, 'AB'), mtx_t, 'ABT')
def create_mapping(coors, gel, order):
"""
Create mapping from transformed (in `x-y` plane) element faces to
reference element faces.
Parameters
----------
coors : array
The transformed coordinates of element nodes, shape `(n_el,
n_ep, dim)`. The function verifies that the all `z` components
are zero.
gel : GeometryElement instance
The geometry element corresponding to the faces.
order : int
The polynomial order of the mapping.
Returns
-------
mapping : VolumeMapping instance
The reference element face mapping.
"""
# Strip 'z' component (should be 0 now...).
assert_(nm.allclose(coors[:, :, -1], 0.0, rtol=1e-12, atol=1e-12))
coors = coors[:, :, :-1].copy()
# Mapping from transformed element to reference element.
sh = coors.shape
seq_coors = coors.reshape((sh[0] * sh[1], sh[2]))
seq_conn = nm.arange(seq_coors.shape[0], dtype=nm.int32)
seq_conn.shape = sh[:2]
mapping = VolumeMapping(seq_coors, seq_conn, gel=gel, order=1)
return mapping
def describe_geometry(field, region, integral):
"""
Describe membrane geometry in a given region.
Parameters
----------
field : Field instance
The field defining the FE approximation.
region : Region instance
The surface region to describe.
integral : Integral instance
The integral defining the quadrature points.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
membrane_geo : CMapping instance
The mapping from transformed elements to a reference elements.
"""
# Coordinates of element vertices.
sg, _ = field.get_mapping(region, integral, 'surface')
sd = field.surface_data[region.name]
coors = field.coors[sd.econn[:, :sg.n_ep]]
# Coordinate transformation matrix (transposed!).
mtx_t = create_transformation_matrix(coors)
# Transform coordinates to the local coordinate system.
coors_loc = dot_sequences((coors - coors[:, 0:1, :]), mtx_t)
# Mapping from transformed elements to reference elements.
gel = field.gel.surface_facet
vm = create_mapping(coors_loc, gel, 1)
qp = integral.get_qp(gel.name)
ps = PolySpace.any_from_args(None, gel, field.approx_order)
membrane_geo = vm.get_mapping(qp[0], qp[1], poly_space=ps)
membrane_geo.bf[:] = ps.eval_base(qp[0])
return mtx_t, membrane_geo
def describe_deformation(el_disps, bfg):
"""
Describe deformation of a thin incompressible 2D membrane in 3D
space, composed of flat finite element faces.
The coordinate system of each element (face), i.e. the membrane
mid-surface, should coincide with the `x`, `y` axes of the `x-y`
plane.
Parameters
----------
el_disps : array
The displacements of element nodes, shape `(n_el, n_ep, dim)`.
bfg : array
The in-plane base function gradients, shape `(n_el, n_qp, dim-1,
n_ep)`.
Returns
-------
mtx_c ; array
The in-plane right Cauchy-Green deformation tensor
:math:`C_{ij}`, :math:`i, j = 1, 2`.
c33 : array
The component :math:`C_{33}` computed from the incompressibility
condition.
mtx_b : array
The discrete Green strain variation operator.
"""
sh = bfg.shape
n_ep = sh[3]
dim = el_disps.shape[2]
sym2 = dim2sym(dim-1)
# Repeat el_disps by number of quadrature points.
el_disps_qp = insert_strided_axis(el_disps, 1, bfg.shape[1])
# Transformed (in-plane) displacement gradient with
# shape (n_el, n_qp, 2 (-> a), 3 (-> i)), du_i/dX_a.
du = | dot_sequences(bfg, el_disps_qp) | sfepy.linalg.dot_sequences |
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import assert_
from sfepy.linalg import norm_l2_along_axis as norm
from sfepy.linalg import dot_sequences, insert_strided_axis
from sfepy.discrete.fem.poly_spaces import PolySpace
from sfepy.discrete.fem.mappings import VolumeMapping
from sfepy.mechanics.tensors import dim2sym
from six.moves import range
def create_transformation_matrix(coors):
"""
Create a transposed coordinate transformation matrix, that
transforms 3D coordinates of element face nodes so that the
transformed nodes are in the `x-y` plane. The rotation is performed
w.r.t. the first node of each face.
Parameters
----------
coors : array
The coordinates of element nodes, shape `(n_el, n_ep, dim)`.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, i.e.
:math:`X_{inplane} = T^T X_{3D}`.
Notes
-----
:math:`T = [t_1, t_2, n]`, where :math:`t_1`, :math:`t_2`, are unit
in-plane (column) vectors and :math:`n` is the unit normal vector,
all mutually orthonormal.
"""
# Local coordinate system.
t1 = coors[:, 1, :] - coors[:, 0, :]
t2 = coors[:, -1, :] - coors[:, 0, :]
n = nm.cross(t1, t2)
t2 = nm.cross(n, t1)
t1 = t1 / norm(t1)[:, None]
t2 = t2 / norm(t2)[:, None]
n = n / norm(n)[:, None]
# Coordinate transformation matrix (transposed!).
mtx_t = nm.concatenate((t1[:, :, None],
t2[:, :, None],
n[:, :, None]), axis=2)
return mtx_t
def transform_asm_vectors(out, mtx_t):
"""
Transform vector assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of vectors, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[2] // mtx_t.shape[2]
for iep in range(n_ep):
ir = slice(iep, None, n_ep)
fn = out[:, 0, ir, 0]
fn[:] = dot_sequences(mtx_t, fn, 'AB')
def transform_asm_matrices(out, mtx_t):
"""
Transform matrix assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of matrices, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[-1] // mtx_t.shape[-1]
for iepr in range(n_ep):
ir = slice(iepr, None, n_ep)
for iepc in range(n_ep):
ic = slice(iepc, None, n_ep)
fn = out[:, 0, ir, ic]
fn[:] = dot_sequences(dot_sequences(mtx_t, fn, 'AB'), mtx_t, 'ABT')
def create_mapping(coors, gel, order):
"""
Create mapping from transformed (in `x-y` plane) element faces to
reference element faces.
Parameters
----------
coors : array
The transformed coordinates of element nodes, shape `(n_el,
n_ep, dim)`. The function verifies that the all `z` components
are zero.
gel : GeometryElement instance
The geometry element corresponding to the faces.
order : int
The polynomial order of the mapping.
Returns
-------
mapping : VolumeMapping instance
The reference element face mapping.
"""
# Strip 'z' component (should be 0 now...).
assert_(nm.allclose(coors[:, :, -1], 0.0, rtol=1e-12, atol=1e-12))
coors = coors[:, :, :-1].copy()
# Mapping from transformed element to reference element.
sh = coors.shape
seq_coors = coors.reshape((sh[0] * sh[1], sh[2]))
seq_conn = nm.arange(seq_coors.shape[0], dtype=nm.int32)
seq_conn.shape = sh[:2]
mapping = VolumeMapping(seq_coors, seq_conn, gel=gel, order=1)
return mapping
def describe_geometry(field, region, integral):
"""
Describe membrane geometry in a given region.
Parameters
----------
field : Field instance
The field defining the FE approximation.
region : Region instance
The surface region to describe.
integral : Integral instance
The integral defining the quadrature points.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
membrane_geo : CMapping instance
The mapping from transformed elements to a reference elements.
"""
# Coordinates of element vertices.
sg, _ = field.get_mapping(region, integral, 'surface')
sd = field.surface_data[region.name]
coors = field.coors[sd.econn[:, :sg.n_ep]]
# Coordinate transformation matrix (transposed!).
mtx_t = create_transformation_matrix(coors)
# Transform coordinates to the local coordinate system.
coors_loc = dot_sequences((coors - coors[:, 0:1, :]), mtx_t)
# Mapping from transformed elements to reference elements.
gel = field.gel.surface_facet
vm = create_mapping(coors_loc, gel, 1)
qp = integral.get_qp(gel.name)
ps = PolySpace.any_from_args(None, gel, field.approx_order)
membrane_geo = vm.get_mapping(qp[0], qp[1], poly_space=ps)
membrane_geo.bf[:] = ps.eval_base(qp[0])
return mtx_t, membrane_geo
def describe_deformation(el_disps, bfg):
"""
Describe deformation of a thin incompressible 2D membrane in 3D
space, composed of flat finite element faces.
The coordinate system of each element (face), i.e. the membrane
mid-surface, should coincide with the `x`, `y` axes of the `x-y`
plane.
Parameters
----------
el_disps : array
The displacements of element nodes, shape `(n_el, n_ep, dim)`.
bfg : array
The in-plane base function gradients, shape `(n_el, n_qp, dim-1,
n_ep)`.
Returns
-------
mtx_c ; array
The in-plane right Cauchy-Green deformation tensor
:math:`C_{ij}`, :math:`i, j = 1, 2`.
c33 : array
The component :math:`C_{33}` computed from the incompressibility
condition.
mtx_b : array
The discrete Green strain variation operator.
"""
sh = bfg.shape
n_ep = sh[3]
dim = el_disps.shape[2]
sym2 = dim2sym(dim-1)
# Repeat el_disps by number of quadrature points.
el_disps_qp = insert_strided_axis(el_disps, 1, bfg.shape[1])
# Transformed (in-plane) displacement gradient with
# shape (n_el, n_qp, 2 (-> a), 3 (-> i)), du_i/dX_a.
du = dot_sequences(bfg, el_disps_qp)
# Deformation gradient F w.r.t. in plane coordinates.
# F_{ia} = dx_i / dX_a,
# a \in {1, 2} (rows), i \in {1, 2, 3} (columns).
mtx_f = du + nm.eye(dim - 1, dim, dtype=du.dtype)
# Right Cauchy-Green deformation tensor C.
# C_{ab} = F_{ka} F_{kb}, a, b \in {1, 2}.
mtx_c = | dot_sequences(mtx_f, mtx_f, 'ABT') | sfepy.linalg.dot_sequences |
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import assert_
from sfepy.linalg import norm_l2_along_axis as norm
from sfepy.linalg import dot_sequences, insert_strided_axis
from sfepy.discrete.fem.poly_spaces import PolySpace
from sfepy.discrete.fem.mappings import VolumeMapping
from sfepy.mechanics.tensors import dim2sym
from six.moves import range
def create_transformation_matrix(coors):
"""
Create a transposed coordinate transformation matrix, that
transforms 3D coordinates of element face nodes so that the
transformed nodes are in the `x-y` plane. The rotation is performed
w.r.t. the first node of each face.
Parameters
----------
coors : array
The coordinates of element nodes, shape `(n_el, n_ep, dim)`.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, i.e.
:math:`X_{inplane} = T^T X_{3D}`.
Notes
-----
:math:`T = [t_1, t_2, n]`, where :math:`t_1`, :math:`t_2`, are unit
in-plane (column) vectors and :math:`n` is the unit normal vector,
all mutually orthonormal.
"""
# Local coordinate system.
t1 = coors[:, 1, :] - coors[:, 0, :]
t2 = coors[:, -1, :] - coors[:, 0, :]
n = nm.cross(t1, t2)
t2 = nm.cross(n, t1)
t1 = t1 / norm(t1)[:, None]
t2 = t2 / norm(t2)[:, None]
n = n / norm(n)[:, None]
# Coordinate transformation matrix (transposed!).
mtx_t = nm.concatenate((t1[:, :, None],
t2[:, :, None],
n[:, :, None]), axis=2)
return mtx_t
def transform_asm_vectors(out, mtx_t):
"""
Transform vector assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of vectors, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[2] // mtx_t.shape[2]
for iep in range(n_ep):
ir = slice(iep, None, n_ep)
fn = out[:, 0, ir, 0]
fn[:] = dot_sequences(mtx_t, fn, 'AB')
def transform_asm_matrices(out, mtx_t):
"""
Transform matrix assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of matrices, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[-1] // mtx_t.shape[-1]
for iepr in range(n_ep):
ir = slice(iepr, None, n_ep)
for iepc in range(n_ep):
ic = slice(iepc, None, n_ep)
fn = out[:, 0, ir, ic]
fn[:] = dot_sequences(dot_sequences(mtx_t, fn, 'AB'), mtx_t, 'ABT')
def create_mapping(coors, gel, order):
"""
Create mapping from transformed (in `x-y` plane) element faces to
reference element faces.
Parameters
----------
coors : array
The transformed coordinates of element nodes, shape `(n_el,
n_ep, dim)`. The function verifies that the all `z` components
are zero.
gel : GeometryElement instance
The geometry element corresponding to the faces.
order : int
The polynomial order of the mapping.
Returns
-------
mapping : VolumeMapping instance
The reference element face mapping.
"""
# Strip 'z' component (should be 0 now...).
assert_(nm.allclose(coors[:, :, -1], 0.0, rtol=1e-12, atol=1e-12))
coors = coors[:, :, :-1].copy()
# Mapping from transformed element to reference element.
sh = coors.shape
seq_coors = coors.reshape((sh[0] * sh[1], sh[2]))
seq_conn = nm.arange(seq_coors.shape[0], dtype=nm.int32)
seq_conn.shape = sh[:2]
mapping = VolumeMapping(seq_coors, seq_conn, gel=gel, order=1)
return mapping
def describe_geometry(field, region, integral):
"""
Describe membrane geometry in a given region.
Parameters
----------
field : Field instance
The field defining the FE approximation.
region : Region instance
The surface region to describe.
integral : Integral instance
The integral defining the quadrature points.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
membrane_geo : CMapping instance
The mapping from transformed elements to a reference elements.
"""
# Coordinates of element vertices.
sg, _ = field.get_mapping(region, integral, 'surface')
sd = field.surface_data[region.name]
coors = field.coors[sd.econn[:, :sg.n_ep]]
# Coordinate transformation matrix (transposed!).
mtx_t = create_transformation_matrix(coors)
# Transform coordinates to the local coordinate system.
coors_loc = dot_sequences((coors - coors[:, 0:1, :]), mtx_t)
# Mapping from transformed elements to reference elements.
gel = field.gel.surface_facet
vm = create_mapping(coors_loc, gel, 1)
qp = integral.get_qp(gel.name)
ps = PolySpace.any_from_args(None, gel, field.approx_order)
membrane_geo = vm.get_mapping(qp[0], qp[1], poly_space=ps)
membrane_geo.bf[:] = ps.eval_base(qp[0])
return mtx_t, membrane_geo
def describe_deformation(el_disps, bfg):
"""
Describe deformation of a thin incompressible 2D membrane in 3D
space, composed of flat finite element faces.
The coordinate system of each element (face), i.e. the membrane
mid-surface, should coincide with the `x`, `y` axes of the `x-y`
plane.
Parameters
----------
el_disps : array
The displacements of element nodes, shape `(n_el, n_ep, dim)`.
bfg : array
The in-plane base function gradients, shape `(n_el, n_qp, dim-1,
n_ep)`.
Returns
-------
mtx_c ; array
The in-plane right Cauchy-Green deformation tensor
:math:`C_{ij}`, :math:`i, j = 1, 2`.
c33 : array
The component :math:`C_{33}` computed from the incompressibility
condition.
mtx_b : array
The discrete Green strain variation operator.
"""
sh = bfg.shape
n_ep = sh[3]
dim = el_disps.shape[2]
sym2 = dim2sym(dim-1)
# Repeat el_disps by number of quadrature points.
el_disps_qp = insert_strided_axis(el_disps, 1, bfg.shape[1])
# Transformed (in-plane) displacement gradient with
# shape (n_el, n_qp, 2 (-> a), 3 (-> i)), du_i/dX_a.
du = dot_sequences(bfg, el_disps_qp)
# Deformation gradient F w.r.t. in plane coordinates.
# F_{ia} = dx_i / dX_a,
# a \in {1, 2} (rows), i \in {1, 2, 3} (columns).
mtx_f = du + nm.eye(dim - 1, dim, dtype=du.dtype)
# Right Cauchy-Green deformation tensor C.
# C_{ab} = F_{ka} F_{kb}, a, b \in {1, 2}.
mtx_c = dot_sequences(mtx_f, mtx_f, 'ABT')
# C_33 from incompressibility.
c33 = 1.0 / (mtx_c[..., 0, 0] * mtx_c[..., 1, 1]
- mtx_c[..., 0, 1]**2)
# Discrete Green strain variation operator.
mtx_b = nm.empty((sh[0], sh[1], sym2, dim * n_ep), dtype=nm.float64)
mtx_b[..., 0, 0*n_ep:1*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 0:1]
mtx_b[..., 0, 1*n_ep:2*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 1:2]
mtx_b[..., 0, 2*n_ep:3*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 2:3]
mtx_b[..., 1, 0*n_ep:1*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 0:1]
mtx_b[..., 1, 1*n_ep:2*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 1:2]
mtx_b[..., 1, 2*n_ep:3*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 2:3]
mtx_b[..., 2, 0*n_ep:1*n_ep] = bfg[..., 1, :] * mtx_f[..., 0, 0:1] \
+ bfg[..., 0, :] * mtx_f[..., 1, 0:1]
mtx_b[..., 2, 1*n_ep:2*n_ep] = bfg[..., 0, :] * mtx_f[..., 1, 1:2] \
+ bfg[..., 1, :] * mtx_f[..., 0, 1:2]
mtx_b[..., 2, 2*n_ep:3*n_ep] = bfg[..., 0, :] * mtx_f[..., 1, 2:3] \
+ bfg[..., 1, :] * mtx_f[..., 0, 2:3]
return mtx_c, c33, mtx_b
def get_tangent_stress_matrix(stress, bfg):
"""
Get the tangent stress matrix of a thin incompressible 2D membrane
in 3D space, given a stress.
Parameters
----------
stress : array
The components `11, 22, 12` of the second Piola-Kirchhoff stress
tensor, shape `(n_el, n_qp, 3, 1)`.
bfg : array
The in-plane base function gradients, shape `(n_el, n_qp, dim-1,
n_ep)`.
Returns
-------
mtx : array
The tangent stress matrix, shape `(n_el, n_qp, dim*n_ep, dim*n_ep)`.
"""
n_el, n_qp, dim, n_ep = bfg.shape
dim += 1
mtx = nm.zeros((n_el, n_qp, dim * n_ep, dim * n_ep), dtype=nm.float64)
g1tg1 = | dot_sequences(bfg[..., 0:1, :], bfg[..., 0:1, :], 'ATB') | sfepy.linalg.dot_sequences |
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import assert_
from sfepy.linalg import norm_l2_along_axis as norm
from sfepy.linalg import dot_sequences, insert_strided_axis
from sfepy.discrete.fem.poly_spaces import PolySpace
from sfepy.discrete.fem.mappings import VolumeMapping
from sfepy.mechanics.tensors import dim2sym
from six.moves import range
def create_transformation_matrix(coors):
"""
Create a transposed coordinate transformation matrix, that
transforms 3D coordinates of element face nodes so that the
transformed nodes are in the `x-y` plane. The rotation is performed
w.r.t. the first node of each face.
Parameters
----------
coors : array
The coordinates of element nodes, shape `(n_el, n_ep, dim)`.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, i.e.
:math:`X_{inplane} = T^T X_{3D}`.
Notes
-----
:math:`T = [t_1, t_2, n]`, where :math:`t_1`, :math:`t_2`, are unit
in-plane (column) vectors and :math:`n` is the unit normal vector,
all mutually orthonormal.
"""
# Local coordinate system.
t1 = coors[:, 1, :] - coors[:, 0, :]
t2 = coors[:, -1, :] - coors[:, 0, :]
n = nm.cross(t1, t2)
t2 = nm.cross(n, t1)
t1 = t1 / norm(t1)[:, None]
t2 = t2 / norm(t2)[:, None]
n = n / norm(n)[:, None]
# Coordinate transformation matrix (transposed!).
mtx_t = nm.concatenate((t1[:, :, None],
t2[:, :, None],
n[:, :, None]), axis=2)
return mtx_t
def transform_asm_vectors(out, mtx_t):
"""
Transform vector assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of vectors, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[2] // mtx_t.shape[2]
for iep in range(n_ep):
ir = slice(iep, None, n_ep)
fn = out[:, 0, ir, 0]
fn[:] = dot_sequences(mtx_t, fn, 'AB')
def transform_asm_matrices(out, mtx_t):
"""
Transform matrix assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of matrices, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[-1] // mtx_t.shape[-1]
for iepr in range(n_ep):
ir = slice(iepr, None, n_ep)
for iepc in range(n_ep):
ic = slice(iepc, None, n_ep)
fn = out[:, 0, ir, ic]
fn[:] = dot_sequences(dot_sequences(mtx_t, fn, 'AB'), mtx_t, 'ABT')
def create_mapping(coors, gel, order):
"""
Create mapping from transformed (in `x-y` plane) element faces to
reference element faces.
Parameters
----------
coors : array
The transformed coordinates of element nodes, shape `(n_el,
n_ep, dim)`. The function verifies that the all `z` components
are zero.
gel : GeometryElement instance
The geometry element corresponding to the faces.
order : int
The polynomial order of the mapping.
Returns
-------
mapping : VolumeMapping instance
The reference element face mapping.
"""
# Strip 'z' component (should be 0 now...).
assert_(nm.allclose(coors[:, :, -1], 0.0, rtol=1e-12, atol=1e-12))
coors = coors[:, :, :-1].copy()
# Mapping from transformed element to reference element.
sh = coors.shape
seq_coors = coors.reshape((sh[0] * sh[1], sh[2]))
seq_conn = nm.arange(seq_coors.shape[0], dtype=nm.int32)
seq_conn.shape = sh[:2]
mapping = VolumeMapping(seq_coors, seq_conn, gel=gel, order=1)
return mapping
def describe_geometry(field, region, integral):
"""
Describe membrane geometry in a given region.
Parameters
----------
field : Field instance
The field defining the FE approximation.
region : Region instance
The surface region to describe.
integral : Integral instance
The integral defining the quadrature points.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
membrane_geo : CMapping instance
The mapping from transformed elements to a reference elements.
"""
# Coordinates of element vertices.
sg, _ = field.get_mapping(region, integral, 'surface')
sd = field.surface_data[region.name]
coors = field.coors[sd.econn[:, :sg.n_ep]]
# Coordinate transformation matrix (transposed!).
mtx_t = create_transformation_matrix(coors)
# Transform coordinates to the local coordinate system.
coors_loc = dot_sequences((coors - coors[:, 0:1, :]), mtx_t)
# Mapping from transformed elements to reference elements.
gel = field.gel.surface_facet
vm = create_mapping(coors_loc, gel, 1)
qp = integral.get_qp(gel.name)
ps = PolySpace.any_from_args(None, gel, field.approx_order)
membrane_geo = vm.get_mapping(qp[0], qp[1], poly_space=ps)
membrane_geo.bf[:] = ps.eval_base(qp[0])
return mtx_t, membrane_geo
def describe_deformation(el_disps, bfg):
"""
Describe deformation of a thin incompressible 2D membrane in 3D
space, composed of flat finite element faces.
The coordinate system of each element (face), i.e. the membrane
mid-surface, should coincide with the `x`, `y` axes of the `x-y`
plane.
Parameters
----------
el_disps : array
The displacements of element nodes, shape `(n_el, n_ep, dim)`.
bfg : array
The in-plane base function gradients, shape `(n_el, n_qp, dim-1,
n_ep)`.
Returns
-------
mtx_c ; array
The in-plane right Cauchy-Green deformation tensor
:math:`C_{ij}`, :math:`i, j = 1, 2`.
c33 : array
The component :math:`C_{33}` computed from the incompressibility
condition.
mtx_b : array
The discrete Green strain variation operator.
"""
sh = bfg.shape
n_ep = sh[3]
dim = el_disps.shape[2]
sym2 = dim2sym(dim-1)
# Repeat el_disps by number of quadrature points.
el_disps_qp = insert_strided_axis(el_disps, 1, bfg.shape[1])
# Transformed (in-plane) displacement gradient with
# shape (n_el, n_qp, 2 (-> a), 3 (-> i)), du_i/dX_a.
du = dot_sequences(bfg, el_disps_qp)
# Deformation gradient F w.r.t. in plane coordinates.
# F_{ia} = dx_i / dX_a,
# a \in {1, 2} (rows), i \in {1, 2, 3} (columns).
mtx_f = du + nm.eye(dim - 1, dim, dtype=du.dtype)
# Right Cauchy-Green deformation tensor C.
# C_{ab} = F_{ka} F_{kb}, a, b \in {1, 2}.
mtx_c = dot_sequences(mtx_f, mtx_f, 'ABT')
# C_33 from incompressibility.
c33 = 1.0 / (mtx_c[..., 0, 0] * mtx_c[..., 1, 1]
- mtx_c[..., 0, 1]**2)
# Discrete Green strain variation operator.
mtx_b = nm.empty((sh[0], sh[1], sym2, dim * n_ep), dtype=nm.float64)
mtx_b[..., 0, 0*n_ep:1*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 0:1]
mtx_b[..., 0, 1*n_ep:2*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 1:2]
mtx_b[..., 0, 2*n_ep:3*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 2:3]
mtx_b[..., 1, 0*n_ep:1*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 0:1]
mtx_b[..., 1, 1*n_ep:2*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 1:2]
mtx_b[..., 1, 2*n_ep:3*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 2:3]
mtx_b[..., 2, 0*n_ep:1*n_ep] = bfg[..., 1, :] * mtx_f[..., 0, 0:1] \
+ bfg[..., 0, :] * mtx_f[..., 1, 0:1]
mtx_b[..., 2, 1*n_ep:2*n_ep] = bfg[..., 0, :] * mtx_f[..., 1, 1:2] \
+ bfg[..., 1, :] * mtx_f[..., 0, 1:2]
mtx_b[..., 2, 2*n_ep:3*n_ep] = bfg[..., 0, :] * mtx_f[..., 1, 2:3] \
+ bfg[..., 1, :] * mtx_f[..., 0, 2:3]
return mtx_c, c33, mtx_b
def get_tangent_stress_matrix(stress, bfg):
"""
Get the tangent stress matrix of a thin incompressible 2D membrane
in 3D space, given a stress.
Parameters
----------
stress : array
The components `11, 22, 12` of the second Piola-Kirchhoff stress
tensor, shape `(n_el, n_qp, 3, 1)`.
bfg : array
The in-plane base function gradients, shape `(n_el, n_qp, dim-1,
n_ep)`.
Returns
-------
mtx : array
The tangent stress matrix, shape `(n_el, n_qp, dim*n_ep, dim*n_ep)`.
"""
n_el, n_qp, dim, n_ep = bfg.shape
dim += 1
mtx = nm.zeros((n_el, n_qp, dim * n_ep, dim * n_ep), dtype=nm.float64)
g1tg1 = dot_sequences(bfg[..., 0:1, :], bfg[..., 0:1, :], 'ATB')
g1tg2 = | dot_sequences(bfg[..., 0:1, :], bfg[..., 1:2, :], 'ATB') | sfepy.linalg.dot_sequences |
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import assert_
from sfepy.linalg import norm_l2_along_axis as norm
from sfepy.linalg import dot_sequences, insert_strided_axis
from sfepy.discrete.fem.poly_spaces import PolySpace
from sfepy.discrete.fem.mappings import VolumeMapping
from sfepy.mechanics.tensors import dim2sym
from six.moves import range
def create_transformation_matrix(coors):
"""
Create a transposed coordinate transformation matrix, that
transforms 3D coordinates of element face nodes so that the
transformed nodes are in the `x-y` plane. The rotation is performed
w.r.t. the first node of each face.
Parameters
----------
coors : array
The coordinates of element nodes, shape `(n_el, n_ep, dim)`.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, i.e.
:math:`X_{inplane} = T^T X_{3D}`.
Notes
-----
:math:`T = [t_1, t_2, n]`, where :math:`t_1`, :math:`t_2`, are unit
in-plane (column) vectors and :math:`n` is the unit normal vector,
all mutually orthonormal.
"""
# Local coordinate system.
t1 = coors[:, 1, :] - coors[:, 0, :]
t2 = coors[:, -1, :] - coors[:, 0, :]
n = nm.cross(t1, t2)
t2 = nm.cross(n, t1)
t1 = t1 / norm(t1)[:, None]
t2 = t2 / norm(t2)[:, None]
n = n / norm(n)[:, None]
# Coordinate transformation matrix (transposed!).
mtx_t = nm.concatenate((t1[:, :, None],
t2[:, :, None],
n[:, :, None]), axis=2)
return mtx_t
def transform_asm_vectors(out, mtx_t):
"""
Transform vector assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of vectors, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[2] // mtx_t.shape[2]
for iep in range(n_ep):
ir = slice(iep, None, n_ep)
fn = out[:, 0, ir, 0]
fn[:] = dot_sequences(mtx_t, fn, 'AB')
def transform_asm_matrices(out, mtx_t):
"""
Transform matrix assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of matrices, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[-1] // mtx_t.shape[-1]
for iepr in range(n_ep):
ir = slice(iepr, None, n_ep)
for iepc in range(n_ep):
ic = slice(iepc, None, n_ep)
fn = out[:, 0, ir, ic]
fn[:] = dot_sequences(dot_sequences(mtx_t, fn, 'AB'), mtx_t, 'ABT')
def create_mapping(coors, gel, order):
"""
Create mapping from transformed (in `x-y` plane) element faces to
reference element faces.
Parameters
----------
coors : array
The transformed coordinates of element nodes, shape `(n_el,
n_ep, dim)`. The function verifies that the all `z` components
are zero.
gel : GeometryElement instance
The geometry element corresponding to the faces.
order : int
The polynomial order of the mapping.
Returns
-------
mapping : VolumeMapping instance
The reference element face mapping.
"""
# Strip 'z' component (should be 0 now...).
assert_(nm.allclose(coors[:, :, -1], 0.0, rtol=1e-12, atol=1e-12))
coors = coors[:, :, :-1].copy()
# Mapping from transformed element to reference element.
sh = coors.shape
seq_coors = coors.reshape((sh[0] * sh[1], sh[2]))
seq_conn = nm.arange(seq_coors.shape[0], dtype=nm.int32)
seq_conn.shape = sh[:2]
mapping = VolumeMapping(seq_coors, seq_conn, gel=gel, order=1)
return mapping
def describe_geometry(field, region, integral):
"""
Describe membrane geometry in a given region.
Parameters
----------
field : Field instance
The field defining the FE approximation.
region : Region instance
The surface region to describe.
integral : Integral instance
The integral defining the quadrature points.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
membrane_geo : CMapping instance
The mapping from transformed elements to a reference elements.
"""
# Coordinates of element vertices.
sg, _ = field.get_mapping(region, integral, 'surface')
sd = field.surface_data[region.name]
coors = field.coors[sd.econn[:, :sg.n_ep]]
# Coordinate transformation matrix (transposed!).
mtx_t = create_transformation_matrix(coors)
# Transform coordinates to the local coordinate system.
coors_loc = dot_sequences((coors - coors[:, 0:1, :]), mtx_t)
# Mapping from transformed elements to reference elements.
gel = field.gel.surface_facet
vm = create_mapping(coors_loc, gel, 1)
qp = integral.get_qp(gel.name)
ps = PolySpace.any_from_args(None, gel, field.approx_order)
membrane_geo = vm.get_mapping(qp[0], qp[1], poly_space=ps)
membrane_geo.bf[:] = ps.eval_base(qp[0])
return mtx_t, membrane_geo
def describe_deformation(el_disps, bfg):
"""
Describe deformation of a thin incompressible 2D membrane in 3D
space, composed of flat finite element faces.
The coordinate system of each element (face), i.e. the membrane
mid-surface, should coincide with the `x`, `y` axes of the `x-y`
plane.
Parameters
----------
el_disps : array
The displacements of element nodes, shape `(n_el, n_ep, dim)`.
bfg : array
The in-plane base function gradients, shape `(n_el, n_qp, dim-1,
n_ep)`.
Returns
-------
mtx_c ; array
The in-plane right Cauchy-Green deformation tensor
:math:`C_{ij}`, :math:`i, j = 1, 2`.
c33 : array
The component :math:`C_{33}` computed from the incompressibility
condition.
mtx_b : array
The discrete Green strain variation operator.
"""
sh = bfg.shape
n_ep = sh[3]
dim = el_disps.shape[2]
sym2 = dim2sym(dim-1)
# Repeat el_disps by number of quadrature points.
el_disps_qp = insert_strided_axis(el_disps, 1, bfg.shape[1])
# Transformed (in-plane) displacement gradient with
# shape (n_el, n_qp, 2 (-> a), 3 (-> i)), du_i/dX_a.
du = dot_sequences(bfg, el_disps_qp)
# Deformation gradient F w.r.t. in plane coordinates.
# F_{ia} = dx_i / dX_a,
# a \in {1, 2} (rows), i \in {1, 2, 3} (columns).
mtx_f = du + nm.eye(dim - 1, dim, dtype=du.dtype)
# Right Cauchy-Green deformation tensor C.
# C_{ab} = F_{ka} F_{kb}, a, b \in {1, 2}.
mtx_c = dot_sequences(mtx_f, mtx_f, 'ABT')
# C_33 from incompressibility.
c33 = 1.0 / (mtx_c[..., 0, 0] * mtx_c[..., 1, 1]
- mtx_c[..., 0, 1]**2)
# Discrete Green strain variation operator.
mtx_b = nm.empty((sh[0], sh[1], sym2, dim * n_ep), dtype=nm.float64)
mtx_b[..., 0, 0*n_ep:1*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 0:1]
mtx_b[..., 0, 1*n_ep:2*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 1:2]
mtx_b[..., 0, 2*n_ep:3*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 2:3]
mtx_b[..., 1, 0*n_ep:1*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 0:1]
mtx_b[..., 1, 1*n_ep:2*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 1:2]
mtx_b[..., 1, 2*n_ep:3*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 2:3]
mtx_b[..., 2, 0*n_ep:1*n_ep] = bfg[..., 1, :] * mtx_f[..., 0, 0:1] \
+ bfg[..., 0, :] * mtx_f[..., 1, 0:1]
mtx_b[..., 2, 1*n_ep:2*n_ep] = bfg[..., 0, :] * mtx_f[..., 1, 1:2] \
+ bfg[..., 1, :] * mtx_f[..., 0, 1:2]
mtx_b[..., 2, 2*n_ep:3*n_ep] = bfg[..., 0, :] * mtx_f[..., 1, 2:3] \
+ bfg[..., 1, :] * mtx_f[..., 0, 2:3]
return mtx_c, c33, mtx_b
def get_tangent_stress_matrix(stress, bfg):
"""
Get the tangent stress matrix of a thin incompressible 2D membrane
in 3D space, given a stress.
Parameters
----------
stress : array
The components `11, 22, 12` of the second Piola-Kirchhoff stress
tensor, shape `(n_el, n_qp, 3, 1)`.
bfg : array
The in-plane base function gradients, shape `(n_el, n_qp, dim-1,
n_ep)`.
Returns
-------
mtx : array
The tangent stress matrix, shape `(n_el, n_qp, dim*n_ep, dim*n_ep)`.
"""
n_el, n_qp, dim, n_ep = bfg.shape
dim += 1
mtx = nm.zeros((n_el, n_qp, dim * n_ep, dim * n_ep), dtype=nm.float64)
g1tg1 = dot_sequences(bfg[..., 0:1, :], bfg[..., 0:1, :], 'ATB')
g1tg2 = dot_sequences(bfg[..., 0:1, :], bfg[..., 1:2, :], 'ATB')
g2tg1 = | dot_sequences(bfg[..., 1:2, :], bfg[..., 0:1, :], 'ATB') | sfepy.linalg.dot_sequences |
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import assert_
from sfepy.linalg import norm_l2_along_axis as norm
from sfepy.linalg import dot_sequences, insert_strided_axis
from sfepy.discrete.fem.poly_spaces import PolySpace
from sfepy.discrete.fem.mappings import VolumeMapping
from sfepy.mechanics.tensors import dim2sym
from six.moves import range
def create_transformation_matrix(coors):
"""
Create a transposed coordinate transformation matrix, that
transforms 3D coordinates of element face nodes so that the
transformed nodes are in the `x-y` plane. The rotation is performed
w.r.t. the first node of each face.
Parameters
----------
coors : array
The coordinates of element nodes, shape `(n_el, n_ep, dim)`.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, i.e.
:math:`X_{inplane} = T^T X_{3D}`.
Notes
-----
:math:`T = [t_1, t_2, n]`, where :math:`t_1`, :math:`t_2`, are unit
in-plane (column) vectors and :math:`n` is the unit normal vector,
all mutually orthonormal.
"""
# Local coordinate system.
t1 = coors[:, 1, :] - coors[:, 0, :]
t2 = coors[:, -1, :] - coors[:, 0, :]
n = nm.cross(t1, t2)
t2 = nm.cross(n, t1)
t1 = t1 / norm(t1)[:, None]
t2 = t2 / norm(t2)[:, None]
n = n / norm(n)[:, None]
# Coordinate transformation matrix (transposed!).
mtx_t = nm.concatenate((t1[:, :, None],
t2[:, :, None],
n[:, :, None]), axis=2)
return mtx_t
def transform_asm_vectors(out, mtx_t):
"""
Transform vector assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of vectors, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[2] // mtx_t.shape[2]
for iep in range(n_ep):
ir = slice(iep, None, n_ep)
fn = out[:, 0, ir, 0]
fn[:] = dot_sequences(mtx_t, fn, 'AB')
def transform_asm_matrices(out, mtx_t):
"""
Transform matrix assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of matrices, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[-1] // mtx_t.shape[-1]
for iepr in range(n_ep):
ir = slice(iepr, None, n_ep)
for iepc in range(n_ep):
ic = slice(iepc, None, n_ep)
fn = out[:, 0, ir, ic]
fn[:] = dot_sequences(dot_sequences(mtx_t, fn, 'AB'), mtx_t, 'ABT')
def create_mapping(coors, gel, order):
"""
Create mapping from transformed (in `x-y` plane) element faces to
reference element faces.
Parameters
----------
coors : array
The transformed coordinates of element nodes, shape `(n_el,
n_ep, dim)`. The function verifies that the all `z` components
are zero.
gel : GeometryElement instance
The geometry element corresponding to the faces.
order : int
The polynomial order of the mapping.
Returns
-------
mapping : VolumeMapping instance
The reference element face mapping.
"""
# Strip 'z' component (should be 0 now...).
assert_(nm.allclose(coors[:, :, -1], 0.0, rtol=1e-12, atol=1e-12))
coors = coors[:, :, :-1].copy()
# Mapping from transformed element to reference element.
sh = coors.shape
seq_coors = coors.reshape((sh[0] * sh[1], sh[2]))
seq_conn = nm.arange(seq_coors.shape[0], dtype=nm.int32)
seq_conn.shape = sh[:2]
mapping = VolumeMapping(seq_coors, seq_conn, gel=gel, order=1)
return mapping
def describe_geometry(field, region, integral):
"""
Describe membrane geometry in a given region.
Parameters
----------
field : Field instance
The field defining the FE approximation.
region : Region instance
The surface region to describe.
integral : Integral instance
The integral defining the quadrature points.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
membrane_geo : CMapping instance
The mapping from transformed elements to a reference elements.
"""
# Coordinates of element vertices.
sg, _ = field.get_mapping(region, integral, 'surface')
sd = field.surface_data[region.name]
coors = field.coors[sd.econn[:, :sg.n_ep]]
# Coordinate transformation matrix (transposed!).
mtx_t = create_transformation_matrix(coors)
# Transform coordinates to the local coordinate system.
coors_loc = dot_sequences((coors - coors[:, 0:1, :]), mtx_t)
# Mapping from transformed elements to reference elements.
gel = field.gel.surface_facet
vm = create_mapping(coors_loc, gel, 1)
qp = integral.get_qp(gel.name)
ps = PolySpace.any_from_args(None, gel, field.approx_order)
membrane_geo = vm.get_mapping(qp[0], qp[1], poly_space=ps)
membrane_geo.bf[:] = ps.eval_base(qp[0])
return mtx_t, membrane_geo
def describe_deformation(el_disps, bfg):
"""
Describe deformation of a thin incompressible 2D membrane in 3D
space, composed of flat finite element faces.
The coordinate system of each element (face), i.e. the membrane
mid-surface, should coincide with the `x`, `y` axes of the `x-y`
plane.
Parameters
----------
el_disps : array
The displacements of element nodes, shape `(n_el, n_ep, dim)`.
bfg : array
The in-plane base function gradients, shape `(n_el, n_qp, dim-1,
n_ep)`.
Returns
-------
mtx_c ; array
The in-plane right Cauchy-Green deformation tensor
:math:`C_{ij}`, :math:`i, j = 1, 2`.
c33 : array
The component :math:`C_{33}` computed from the incompressibility
condition.
mtx_b : array
The discrete Green strain variation operator.
"""
sh = bfg.shape
n_ep = sh[3]
dim = el_disps.shape[2]
sym2 = dim2sym(dim-1)
# Repeat el_disps by number of quadrature points.
el_disps_qp = insert_strided_axis(el_disps, 1, bfg.shape[1])
# Transformed (in-plane) displacement gradient with
# shape (n_el, n_qp, 2 (-> a), 3 (-> i)), du_i/dX_a.
du = dot_sequences(bfg, el_disps_qp)
# Deformation gradient F w.r.t. in plane coordinates.
# F_{ia} = dx_i / dX_a,
# a \in {1, 2} (rows), i \in {1, 2, 3} (columns).
mtx_f = du + nm.eye(dim - 1, dim, dtype=du.dtype)
# Right Cauchy-Green deformation tensor C.
# C_{ab} = F_{ka} F_{kb}, a, b \in {1, 2}.
mtx_c = dot_sequences(mtx_f, mtx_f, 'ABT')
# C_33 from incompressibility.
c33 = 1.0 / (mtx_c[..., 0, 0] * mtx_c[..., 1, 1]
- mtx_c[..., 0, 1]**2)
# Discrete Green strain variation operator.
mtx_b = nm.empty((sh[0], sh[1], sym2, dim * n_ep), dtype=nm.float64)
mtx_b[..., 0, 0*n_ep:1*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 0:1]
mtx_b[..., 0, 1*n_ep:2*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 1:2]
mtx_b[..., 0, 2*n_ep:3*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 2:3]
mtx_b[..., 1, 0*n_ep:1*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 0:1]
mtx_b[..., 1, 1*n_ep:2*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 1:2]
mtx_b[..., 1, 2*n_ep:3*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 2:3]
mtx_b[..., 2, 0*n_ep:1*n_ep] = bfg[..., 1, :] * mtx_f[..., 0, 0:1] \
+ bfg[..., 0, :] * mtx_f[..., 1, 0:1]
mtx_b[..., 2, 1*n_ep:2*n_ep] = bfg[..., 0, :] * mtx_f[..., 1, 1:2] \
+ bfg[..., 1, :] * mtx_f[..., 0, 1:2]
mtx_b[..., 2, 2*n_ep:3*n_ep] = bfg[..., 0, :] * mtx_f[..., 1, 2:3] \
+ bfg[..., 1, :] * mtx_f[..., 0, 2:3]
return mtx_c, c33, mtx_b
def get_tangent_stress_matrix(stress, bfg):
"""
Get the tangent stress matrix of a thin incompressible 2D membrane
in 3D space, given a stress.
Parameters
----------
stress : array
The components `11, 22, 12` of the second Piola-Kirchhoff stress
tensor, shape `(n_el, n_qp, 3, 1)`.
bfg : array
The in-plane base function gradients, shape `(n_el, n_qp, dim-1,
n_ep)`.
Returns
-------
mtx : array
The tangent stress matrix, shape `(n_el, n_qp, dim*n_ep, dim*n_ep)`.
"""
n_el, n_qp, dim, n_ep = bfg.shape
dim += 1
mtx = nm.zeros((n_el, n_qp, dim * n_ep, dim * n_ep), dtype=nm.float64)
g1tg1 = dot_sequences(bfg[..., 0:1, :], bfg[..., 0:1, :], 'ATB')
g1tg2 = dot_sequences(bfg[..., 0:1, :], bfg[..., 1:2, :], 'ATB')
g2tg1 = dot_sequences(bfg[..., 1:2, :], bfg[..., 0:1, :], 'ATB')
g2tg2 = | dot_sequences(bfg[..., 1:2, :], bfg[..., 1:2, :], 'ATB') | sfepy.linalg.dot_sequences |
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import assert_
from sfepy.linalg import norm_l2_along_axis as norm
from sfepy.linalg import dot_sequences, insert_strided_axis
from sfepy.discrete.fem.poly_spaces import PolySpace
from sfepy.discrete.fem.mappings import VolumeMapping
from sfepy.mechanics.tensors import dim2sym
from six.moves import range
def create_transformation_matrix(coors):
"""
Create a transposed coordinate transformation matrix, that
transforms 3D coordinates of element face nodes so that the
transformed nodes are in the `x-y` plane. The rotation is performed
w.r.t. the first node of each face.
Parameters
----------
coors : array
The coordinates of element nodes, shape `(n_el, n_ep, dim)`.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, i.e.
:math:`X_{inplane} = T^T X_{3D}`.
Notes
-----
:math:`T = [t_1, t_2, n]`, where :math:`t_1`, :math:`t_2`, are unit
in-plane (column) vectors and :math:`n` is the unit normal vector,
all mutually orthonormal.
"""
# Local coordinate system.
t1 = coors[:, 1, :] - coors[:, 0, :]
t2 = coors[:, -1, :] - coors[:, 0, :]
n = nm.cross(t1, t2)
t2 = nm.cross(n, t1)
t1 = t1 / norm(t1)[:, None]
t2 = t2 / norm(t2)[:, None]
n = n / norm(n)[:, None]
# Coordinate transformation matrix (transposed!).
mtx_t = nm.concatenate((t1[:, :, None],
t2[:, :, None],
n[:, :, None]), axis=2)
return mtx_t
def transform_asm_vectors(out, mtx_t):
"""
Transform vector assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of vectors, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[2] // mtx_t.shape[2]
for iep in range(n_ep):
ir = slice(iep, None, n_ep)
fn = out[:, 0, ir, 0]
fn[:] = dot_sequences(mtx_t, fn, 'AB')
def transform_asm_matrices(out, mtx_t):
"""
Transform matrix assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of matrices, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[-1] // mtx_t.shape[-1]
for iepr in range(n_ep):
ir = slice(iepr, None, n_ep)
for iepc in range(n_ep):
ic = slice(iepc, None, n_ep)
fn = out[:, 0, ir, ic]
fn[:] = dot_sequences(dot_sequences(mtx_t, fn, 'AB'), mtx_t, 'ABT')
def create_mapping(coors, gel, order):
"""
Create mapping from transformed (in `x-y` plane) element faces to
reference element faces.
Parameters
----------
coors : array
The transformed coordinates of element nodes, shape `(n_el,
n_ep, dim)`. The function verifies that the all `z` components
are zero.
gel : GeometryElement instance
The geometry element corresponding to the faces.
order : int
The polynomial order of the mapping.
Returns
-------
mapping : VolumeMapping instance
The reference element face mapping.
"""
# Strip 'z' component (should be 0 now...).
assert_(nm.allclose(coors[:, :, -1], 0.0, rtol=1e-12, atol=1e-12))
coors = coors[:, :, :-1].copy()
# Mapping from transformed element to reference element.
sh = coors.shape
seq_coors = coors.reshape((sh[0] * sh[1], sh[2]))
seq_conn = nm.arange(seq_coors.shape[0], dtype=nm.int32)
seq_conn.shape = sh[:2]
mapping = VolumeMapping(seq_coors, seq_conn, gel=gel, order=1)
return mapping
def describe_geometry(field, region, integral):
"""
Describe membrane geometry in a given region.
Parameters
----------
field : Field instance
The field defining the FE approximation.
region : Region instance
The surface region to describe.
integral : Integral instance
The integral defining the quadrature points.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
membrane_geo : CMapping instance
The mapping from transformed elements to a reference elements.
"""
# Coordinates of element vertices.
sg, _ = field.get_mapping(region, integral, 'surface')
sd = field.surface_data[region.name]
coors = field.coors[sd.econn[:, :sg.n_ep]]
# Coordinate transformation matrix (transposed!).
mtx_t = create_transformation_matrix(coors)
# Transform coordinates to the local coordinate system.
coors_loc = dot_sequences((coors - coors[:, 0:1, :]), mtx_t)
# Mapping from transformed elements to reference elements.
gel = field.gel.surface_facet
vm = create_mapping(coors_loc, gel, 1)
qp = integral.get_qp(gel.name)
ps = PolySpace.any_from_args(None, gel, field.approx_order)
membrane_geo = vm.get_mapping(qp[0], qp[1], poly_space=ps)
membrane_geo.bf[:] = ps.eval_base(qp[0])
return mtx_t, membrane_geo
def describe_deformation(el_disps, bfg):
"""
Describe deformation of a thin incompressible 2D membrane in 3D
space, composed of flat finite element faces.
The coordinate system of each element (face), i.e. the membrane
mid-surface, should coincide with the `x`, `y` axes of the `x-y`
plane.
Parameters
----------
el_disps : array
The displacements of element nodes, shape `(n_el, n_ep, dim)`.
bfg : array
The in-plane base function gradients, shape `(n_el, n_qp, dim-1,
n_ep)`.
Returns
-------
mtx_c ; array
The in-plane right Cauchy-Green deformation tensor
:math:`C_{ij}`, :math:`i, j = 1, 2`.
c33 : array
The component :math:`C_{33}` computed from the incompressibility
condition.
mtx_b : array
The discrete Green strain variation operator.
"""
sh = bfg.shape
n_ep = sh[3]
dim = el_disps.shape[2]
sym2 = dim2sym(dim-1)
# Repeat el_disps by number of quadrature points.
el_disps_qp = insert_strided_axis(el_disps, 1, bfg.shape[1])
# Transformed (in-plane) displacement gradient with
# shape (n_el, n_qp, 2 (-> a), 3 (-> i)), du_i/dX_a.
du = dot_sequences(bfg, el_disps_qp)
# Deformation gradient F w.r.t. in plane coordinates.
# F_{ia} = dx_i / dX_a,
# a \in {1, 2} (rows), i \in {1, 2, 3} (columns).
mtx_f = du + nm.eye(dim - 1, dim, dtype=du.dtype)
# Right Cauchy-Green deformation tensor C.
# C_{ab} = F_{ka} F_{kb}, a, b \in {1, 2}.
mtx_c = dot_sequences(mtx_f, mtx_f, 'ABT')
# C_33 from incompressibility.
c33 = 1.0 / (mtx_c[..., 0, 0] * mtx_c[..., 1, 1]
- mtx_c[..., 0, 1]**2)
# Discrete Green strain variation operator.
mtx_b = nm.empty((sh[0], sh[1], sym2, dim * n_ep), dtype=nm.float64)
mtx_b[..., 0, 0*n_ep:1*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 0:1]
mtx_b[..., 0, 1*n_ep:2*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 1:2]
mtx_b[..., 0, 2*n_ep:3*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 2:3]
mtx_b[..., 1, 0*n_ep:1*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 0:1]
mtx_b[..., 1, 1*n_ep:2*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 1:2]
mtx_b[..., 1, 2*n_ep:3*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 2:3]
mtx_b[..., 2, 0*n_ep:1*n_ep] = bfg[..., 1, :] * mtx_f[..., 0, 0:1] \
+ bfg[..., 0, :] * mtx_f[..., 1, 0:1]
mtx_b[..., 2, 1*n_ep:2*n_ep] = bfg[..., 0, :] * mtx_f[..., 1, 1:2] \
+ bfg[..., 1, :] * mtx_f[..., 0, 1:2]
mtx_b[..., 2, 2*n_ep:3*n_ep] = bfg[..., 0, :] * mtx_f[..., 1, 2:3] \
+ bfg[..., 1, :] * mtx_f[..., 0, 2:3]
return mtx_c, c33, mtx_b
def get_tangent_stress_matrix(stress, bfg):
"""
Get the tangent stress matrix of a thin incompressible 2D membrane
in 3D space, given a stress.
Parameters
----------
stress : array
The components `11, 22, 12` of the second Piola-Kirchhoff stress
tensor, shape `(n_el, n_qp, 3, 1)`.
bfg : array
The in-plane base function gradients, shape `(n_el, n_qp, dim-1,
n_ep)`.
Returns
-------
mtx : array
The tangent stress matrix, shape `(n_el, n_qp, dim*n_ep, dim*n_ep)`.
"""
n_el, n_qp, dim, n_ep = bfg.shape
dim += 1
mtx = nm.zeros((n_el, n_qp, dim * n_ep, dim * n_ep), dtype=nm.float64)
g1tg1 = dot_sequences(bfg[..., 0:1, :], bfg[..., 0:1, :], 'ATB')
g1tg2 = dot_sequences(bfg[..., 0:1, :], bfg[..., 1:2, :], 'ATB')
g2tg1 = dot_sequences(bfg[..., 1:2, :], bfg[..., 0:1, :], 'ATB')
g2tg2 = dot_sequences(bfg[..., 1:2, :], bfg[..., 1:2, :], 'ATB')
aux = stress[..., 0:1, :] * g1tg1 + stress[..., 2:3, :] * g1tg2 \
+ stress[..., 2:3, :] * g2tg1 + stress[..., 1:2, :] * g2tg2
mtx[..., 0 * n_ep : 1 * n_ep, 0 * n_ep : 1 * n_ep] = aux
mtx[..., 1 * n_ep : 2 * n_ep, 1 * n_ep : 2 * n_ep] = aux
mtx[..., 2 * n_ep : 3 * n_ep, 2 * n_ep : 3 * n_ep] = aux
return mtx
def get_invariants(mtx_c, c33):
"""
Get the first and second invariants of the right Cauchy-Green
deformation tensor describing deformation of an incompressible
membrane.
Parameters
----------
mtx_c ; array
The in-plane right Cauchy-Green deformation tensor
:math:`C_{ij}`, :math:`i, j = 1, 2`, shape `(n_el, n_qp, dim-1,
dim-1)`.
c33 : array
The component :math:`C_{33}` computed from the incompressibility
condition, shape `(n_el, n_qp)`.
Returns
-------
i1 : array
The first invariant of :math:`C_{ij}`.
i2 : array
The second invariant of :math:`C_{ij}`.
"""
i1 = mtx_c[..., 0, 0] + mtx_c[..., 1, 1] + c33
i2 = mtx_c[..., 0, 0] * mtx_c[..., 1,1] \
+ mtx_c[..., 1, 1] * c33 \
+ mtx_c[..., 0, 0] * c33 \
- mtx_c[..., 0, 1]**2
return i1, i2
def get_green_strain_sym3d(mtx_c, c33):
r"""
Get the 3D Green strain tensor in symmetric storage.
Parameters
----------
mtx_c ; array
The in-plane right Cauchy-Green deformation tensor
:math:`C_{ij}`, :math:`i, j = 1, 2`, shape `(n_el, n_qp, dim-1,
dim-1)`.
c33 : array
The component :math:`C_{33}` computed from the incompressibility
condition, shape `(n_el, n_qp)`.
Returns
-------
mtx_e : array
The membrane Green strain :math:`E_{ij} = \frac{1}{2} (C_{ij}) -
\delta_{ij}`, symmetric storage: items (11, 22, 33, 12, 13, 23),
shape `(n_el, n_qp, sym, 1)`.
"""
n_el, n_qp, dm, _ = mtx_c.shape
dim = dm + 1
sym = | dim2sym(dim) | sfepy.mechanics.tensors.dim2sym |
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import assert_
from sfepy.linalg import norm_l2_along_axis as norm
from sfepy.linalg import dot_sequences, insert_strided_axis
from sfepy.discrete.fem.poly_spaces import PolySpace
from sfepy.discrete.fem.mappings import VolumeMapping
from sfepy.mechanics.tensors import dim2sym
from six.moves import range
def create_transformation_matrix(coors):
"""
Create a transposed coordinate transformation matrix, that
transforms 3D coordinates of element face nodes so that the
transformed nodes are in the `x-y` plane. The rotation is performed
w.r.t. the first node of each face.
Parameters
----------
coors : array
The coordinates of element nodes, shape `(n_el, n_ep, dim)`.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, i.e.
:math:`X_{inplane} = T^T X_{3D}`.
Notes
-----
:math:`T = [t_1, t_2, n]`, where :math:`t_1`, :math:`t_2`, are unit
in-plane (column) vectors and :math:`n` is the unit normal vector,
all mutually orthonormal.
"""
# Local coordinate system.
t1 = coors[:, 1, :] - coors[:, 0, :]
t2 = coors[:, -1, :] - coors[:, 0, :]
n = nm.cross(t1, t2)
t2 = nm.cross(n, t1)
t1 = t1 / norm(t1)[:, None]
t2 = t2 / norm(t2)[:, None]
n = n / norm(n)[:, None]
# Coordinate transformation matrix (transposed!).
mtx_t = nm.concatenate((t1[:, :, None],
t2[:, :, None],
n[:, :, None]), axis=2)
return mtx_t
def transform_asm_vectors(out, mtx_t):
"""
Transform vector assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of vectors, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[2] // mtx_t.shape[2]
for iep in range(n_ep):
ir = slice(iep, None, n_ep)
fn = out[:, 0, ir, 0]
fn[:] = | dot_sequences(mtx_t, fn, 'AB') | sfepy.linalg.dot_sequences |
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import assert_
from sfepy.linalg import norm_l2_along_axis as norm
from sfepy.linalg import dot_sequences, insert_strided_axis
from sfepy.discrete.fem.poly_spaces import PolySpace
from sfepy.discrete.fem.mappings import VolumeMapping
from sfepy.mechanics.tensors import dim2sym
from six.moves import range
def create_transformation_matrix(coors):
"""
Create a transposed coordinate transformation matrix, that
transforms 3D coordinates of element face nodes so that the
transformed nodes are in the `x-y` plane. The rotation is performed
w.r.t. the first node of each face.
Parameters
----------
coors : array
The coordinates of element nodes, shape `(n_el, n_ep, dim)`.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, i.e.
:math:`X_{inplane} = T^T X_{3D}`.
Notes
-----
:math:`T = [t_1, t_2, n]`, where :math:`t_1`, :math:`t_2`, are unit
in-plane (column) vectors and :math:`n` is the unit normal vector,
all mutually orthonormal.
"""
# Local coordinate system.
t1 = coors[:, 1, :] - coors[:, 0, :]
t2 = coors[:, -1, :] - coors[:, 0, :]
n = nm.cross(t1, t2)
t2 = nm.cross(n, t1)
t1 = t1 / | norm(t1) | sfepy.linalg.norm_l2_along_axis |
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import assert_
from sfepy.linalg import norm_l2_along_axis as norm
from sfepy.linalg import dot_sequences, insert_strided_axis
from sfepy.discrete.fem.poly_spaces import PolySpace
from sfepy.discrete.fem.mappings import VolumeMapping
from sfepy.mechanics.tensors import dim2sym
from six.moves import range
def create_transformation_matrix(coors):
"""
Create a transposed coordinate transformation matrix, that
transforms 3D coordinates of element face nodes so that the
transformed nodes are in the `x-y` plane. The rotation is performed
w.r.t. the first node of each face.
Parameters
----------
coors : array
The coordinates of element nodes, shape `(n_el, n_ep, dim)`.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, i.e.
:math:`X_{inplane} = T^T X_{3D}`.
Notes
-----
:math:`T = [t_1, t_2, n]`, where :math:`t_1`, :math:`t_2`, are unit
in-plane (column) vectors and :math:`n` is the unit normal vector,
all mutually orthonormal.
"""
# Local coordinate system.
t1 = coors[:, 1, :] - coors[:, 0, :]
t2 = coors[:, -1, :] - coors[:, 0, :]
n = nm.cross(t1, t2)
t2 = nm.cross(n, t1)
t1 = t1 / norm(t1)[:, None]
t2 = t2 / | norm(t2) | sfepy.linalg.norm_l2_along_axis |
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import assert_
from sfepy.linalg import norm_l2_along_axis as norm
from sfepy.linalg import dot_sequences, insert_strided_axis
from sfepy.discrete.fem.poly_spaces import PolySpace
from sfepy.discrete.fem.mappings import VolumeMapping
from sfepy.mechanics.tensors import dim2sym
from six.moves import range
def create_transformation_matrix(coors):
"""
Create a transposed coordinate transformation matrix, that
transforms 3D coordinates of element face nodes so that the
transformed nodes are in the `x-y` plane. The rotation is performed
w.r.t. the first node of each face.
Parameters
----------
coors : array
The coordinates of element nodes, shape `(n_el, n_ep, dim)`.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, i.e.
:math:`X_{inplane} = T^T X_{3D}`.
Notes
-----
:math:`T = [t_1, t_2, n]`, where :math:`t_1`, :math:`t_2`, are unit
in-plane (column) vectors and :math:`n` is the unit normal vector,
all mutually orthonormal.
"""
# Local coordinate system.
t1 = coors[:, 1, :] - coors[:, 0, :]
t2 = coors[:, -1, :] - coors[:, 0, :]
n = nm.cross(t1, t2)
t2 = nm.cross(n, t1)
t1 = t1 / norm(t1)[:, None]
t2 = t2 / norm(t2)[:, None]
n = n / | norm(n) | sfepy.linalg.norm_l2_along_axis |
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import assert_
from sfepy.linalg import norm_l2_along_axis as norm
from sfepy.linalg import dot_sequences, insert_strided_axis
from sfepy.discrete.fem.poly_spaces import PolySpace
from sfepy.discrete.fem.mappings import VolumeMapping
from sfepy.mechanics.tensors import dim2sym
from six.moves import range
def create_transformation_matrix(coors):
"""
Create a transposed coordinate transformation matrix, that
transforms 3D coordinates of element face nodes so that the
transformed nodes are in the `x-y` plane. The rotation is performed
w.r.t. the first node of each face.
Parameters
----------
coors : array
The coordinates of element nodes, shape `(n_el, n_ep, dim)`.
Returns
-------
mtx_t : array
The transposed transformation matrix :math:`T`, i.e.
:math:`X_{inplane} = T^T X_{3D}`.
Notes
-----
:math:`T = [t_1, t_2, n]`, where :math:`t_1`, :math:`t_2`, are unit
in-plane (column) vectors and :math:`n` is the unit normal vector,
all mutually orthonormal.
"""
# Local coordinate system.
t1 = coors[:, 1, :] - coors[:, 0, :]
t2 = coors[:, -1, :] - coors[:, 0, :]
n = nm.cross(t1, t2)
t2 = nm.cross(n, t1)
t1 = t1 / norm(t1)[:, None]
t2 = t2 / norm(t2)[:, None]
n = n / norm(n)[:, None]
# Coordinate transformation matrix (transposed!).
mtx_t = nm.concatenate((t1[:, :, None],
t2[:, :, None],
n[:, :, None]), axis=2)
return mtx_t
def transform_asm_vectors(out, mtx_t):
"""
Transform vector assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of vectors, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[2] // mtx_t.shape[2]
for iep in range(n_ep):
ir = slice(iep, None, n_ep)
fn = out[:, 0, ir, 0]
fn[:] = dot_sequences(mtx_t, fn, 'AB')
def transform_asm_matrices(out, mtx_t):
"""
Transform matrix assembling contributions to global coordinate system, one
node at a time.
Parameters
----------
out : array
The array of matrices, transformed in-place.
mtx_t : array
The transposed transformation matrix :math:`T`, see
:func:`create_transformation_matrix`.
"""
n_ep = out.shape[-1] // mtx_t.shape[-1]
for iepr in range(n_ep):
ir = slice(iepr, None, n_ep)
for iepc in range(n_ep):
ic = slice(iepc, None, n_ep)
fn = out[:, 0, ir, ic]
fn[:] = dot_sequences( | dot_sequences(mtx_t, fn, 'AB') | sfepy.linalg.dot_sequences |
import os.path as op
import numpy as nm
import sfepy
from sfepy.discrete.common import Field
import sfepy.discrete.common.global_interp as gi
from sfepy.base.testing import TestCommon
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
test = Test(conf=conf, options=options)
return test
def test_ref_coors_fem(self):
from sfepy.discrete.fem import Mesh, FEDomain
mesh = Mesh.from_file('meshes/3d/special/cross3d.mesh',
prefix_dir=sfepy.data_dir)
domain = | FEDomain('domain', mesh) | sfepy.discrete.fem.FEDomain |
"""Classes for probing values of Variables, for example, along a line."""
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import get_default, basestr, Struct
from sfepy.linalg import make_axis_rotation_matrix, norm_l2_along_axis
import six
def write_results(filename, probe, results):
"""
Write probing results into a file.
Parameters
----------
filename : str or file object
The output file name.
probe : Probe subclass instance
The probe used to obtain the results.
results : dict
The dictionary of probing results. Keys are data names, values are
the probed values.
"""
fd = open(filename, 'w') if isinstance(filename, basestr) else filename
fd.write('\n'.join(probe.report()) + '\n')
for key, result in six.iteritems(results):
pars, vals = result
fd.write('\n# %s %d\n' % (key, vals.shape[-1]))
if vals.ndim == 1:
aux = nm.hstack((pars[:,None], vals[:,None]))
else:
aux = nm.hstack((pars[:,None], vals))
nm.savetxt(fd, aux)
if isinstance(filename, basestr):
fd.close()
def read_results(filename, only_names=None):
"""
Read probing results from a file.
Parameters
----------
filename : str or file object
The probe results file name.
Returns
-------
header : Struct instance
The probe data header.
results : dict
The dictionary of probing results. Keys are data names, values are
the probed values.
"""
from sfepy.base.ioutils import read_array
only_names = | get_default(only_names, []) | sfepy.base.base.get_default |
"""Classes for probing values of Variables, for example, along a line."""
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import get_default, basestr, Struct
from sfepy.linalg import make_axis_rotation_matrix, norm_l2_along_axis
import six
def write_results(filename, probe, results):
"""
Write probing results into a file.
Parameters
----------
filename : str or file object
The output file name.
probe : Probe subclass instance
The probe used to obtain the results.
results : dict
The dictionary of probing results. Keys are data names, values are
the probed values.
"""
fd = open(filename, 'w') if isinstance(filename, basestr) else filename
fd.write('\n'.join(probe.report()) + '\n')
for key, result in six.iteritems(results):
pars, vals = result
fd.write('\n# %s %d\n' % (key, vals.shape[-1]))
if vals.ndim == 1:
aux = nm.hstack((pars[:,None], vals[:,None]))
else:
aux = nm.hstack((pars[:,None], vals))
nm.savetxt(fd, aux)
if isinstance(filename, basestr):
fd.close()
def read_results(filename, only_names=None):
"""
Read probing results from a file.
Parameters
----------
filename : str or file object
The probe results file name.
Returns
-------
header : Struct instance
The probe data header.
results : dict
The dictionary of probing results. Keys are data names, values are
the probed values.
"""
from sfepy.base.ioutils import read_array
only_names = get_default(only_names, [])
fd = open(filename, 'r') if isinstance(filename, basestr) else filename
header = read_header(fd)
results = {}
for name, nc in get_data_name(fd):
if name not in only_names: continue
result = read_array(fd, header.n_point, nc + 1, nm.float64)
results[name] = result
return header, results
def read_header(fd):
"""
Read the probe data header from file descriptor fd.
Returns
-------
header : Struct instance
The probe data header.
"""
header = | Struct(name='probe_data_header') | sfepy.base.base.Struct |
"""Classes for probing values of Variables, for example, along a line."""
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import get_default, basestr, Struct
from sfepy.linalg import make_axis_rotation_matrix, norm_l2_along_axis
import six
def write_results(filename, probe, results):
"""
Write probing results into a file.
Parameters
----------
filename : str or file object
The output file name.
probe : Probe subclass instance
The probe used to obtain the results.
results : dict
The dictionary of probing results. Keys are data names, values are
the probed values.
"""
fd = open(filename, 'w') if isinstance(filename, basestr) else filename
fd.write('\n'.join(probe.report()) + '\n')
for key, result in six.iteritems(results):
pars, vals = result
fd.write('\n# %s %d\n' % (key, vals.shape[-1]))
if vals.ndim == 1:
aux = nm.hstack((pars[:,None], vals[:,None]))
else:
aux = nm.hstack((pars[:,None], vals))
nm.savetxt(fd, aux)
if isinstance(filename, basestr):
fd.close()
def read_results(filename, only_names=None):
"""
Read probing results from a file.
Parameters
----------
filename : str or file object
The probe results file name.
Returns
-------
header : Struct instance
The probe data header.
results : dict
The dictionary of probing results. Keys are data names, values are
the probed values.
"""
from sfepy.base.ioutils import read_array
only_names = get_default(only_names, [])
fd = open(filename, 'r') if isinstance(filename, basestr) else filename
header = read_header(fd)
results = {}
for name, nc in get_data_name(fd):
if name not in only_names: continue
result = read_array(fd, header.n_point, nc + 1, nm.float64)
results[name] = result
return header, results
def read_header(fd):
"""
Read the probe data header from file descriptor fd.
Returns
-------
header : Struct instance
The probe data header.
"""
header = Struct(name='probe_data_header')
header.probe_class = fd.readline().strip()
aux = fd.readline().strip().split(':')[1]
header.n_point = int(aux.strip().split()[0])
details = []
while 1:
line = fd.readline().strip()
if line == '-----':
break
else:
details.append(line)
header.details = '\n'.join(details)
return header
def get_data_name(fd):
"""
Try to read next data name in file fd.
Returns
-------
name : str
The data name.
nc : int
The number of data columns.
"""
name = None
while 1:
try:
line = fd.readline()
if (len(line) == 0): break
if len(line) == 1: continue
except:
raise StopIteration
line = line.strip().split()
if (len(line) == 3) and (line[0] == '#'):
name = line[1]
nc = int(line[2])
yield name, nc
class Probe(Struct):
"""
Base class for all point probes. Enforces two points minimum.
"""
cache = | Struct(name='probe_shared_evaluate_cache') | sfepy.base.base.Struct |
"""Classes for probing values of Variables, for example, along a line."""
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import get_default, basestr, Struct
from sfepy.linalg import make_axis_rotation_matrix, norm_l2_along_axis
import six
def write_results(filename, probe, results):
"""
Write probing results into a file.
Parameters
----------
filename : str or file object
The output file name.
probe : Probe subclass instance
The probe used to obtain the results.
results : dict
The dictionary of probing results. Keys are data names, values are
the probed values.
"""
fd = open(filename, 'w') if isinstance(filename, basestr) else filename
fd.write('\n'.join(probe.report()) + '\n')
for key, result in six.iteritems(results):
pars, vals = result
fd.write('\n# %s %d\n' % (key, vals.shape[-1]))
if vals.ndim == 1:
aux = nm.hstack((pars[:,None], vals[:,None]))
else:
aux = nm.hstack((pars[:,None], vals))
nm.savetxt(fd, aux)
if isinstance(filename, basestr):
fd.close()
def read_results(filename, only_names=None):
"""
Read probing results from a file.
Parameters
----------
filename : str or file object
The probe results file name.
Returns
-------
header : Struct instance
The probe data header.
results : dict
The dictionary of probing results. Keys are data names, values are
the probed values.
"""
from sfepy.base.ioutils import read_array
only_names = get_default(only_names, [])
fd = open(filename, 'r') if isinstance(filename, basestr) else filename
header = read_header(fd)
results = {}
for name, nc in get_data_name(fd):
if name not in only_names: continue
result = | read_array(fd, header.n_point, nc + 1, nm.float64) | sfepy.base.ioutils.read_array |
"""Classes for probing values of Variables, for example, along a line."""
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import get_default, basestr, Struct
from sfepy.linalg import make_axis_rotation_matrix, norm_l2_along_axis
import six
def write_results(filename, probe, results):
"""
Write probing results into a file.
Parameters
----------
filename : str or file object
The output file name.
probe : Probe subclass instance
The probe used to obtain the results.
results : dict
The dictionary of probing results. Keys are data names, values are
the probed values.
"""
fd = open(filename, 'w') if isinstance(filename, basestr) else filename
fd.write('\n'.join(probe.report()) + '\n')
for key, result in six.iteritems(results):
pars, vals = result
fd.write('\n# %s %d\n' % (key, vals.shape[-1]))
if vals.ndim == 1:
aux = nm.hstack((pars[:,None], vals[:,None]))
else:
aux = nm.hstack((pars[:,None], vals))
nm.savetxt(fd, aux)
if isinstance(filename, basestr):
fd.close()
def read_results(filename, only_names=None):
"""
Read probing results from a file.
Parameters
----------
filename : str or file object
The probe results file name.
Returns
-------
header : Struct instance
The probe data header.
results : dict
The dictionary of probing results. Keys are data names, values are
the probed values.
"""
from sfepy.base.ioutils import read_array
only_names = get_default(only_names, [])
fd = open(filename, 'r') if isinstance(filename, basestr) else filename
header = read_header(fd)
results = {}
for name, nc in get_data_name(fd):
if name not in only_names: continue
result = read_array(fd, header.n_point, nc + 1, nm.float64)
results[name] = result
return header, results
def read_header(fd):
"""
Read the probe data header from file descriptor fd.
Returns
-------
header : Struct instance
The probe data header.
"""
header = Struct(name='probe_data_header')
header.probe_class = fd.readline().strip()
aux = fd.readline().strip().split(':')[1]
header.n_point = int(aux.strip().split()[0])
details = []
while 1:
line = fd.readline().strip()
if line == '-----':
break
else:
details.append(line)
header.details = '\n'.join(details)
return header
def get_data_name(fd):
"""
Try to read next data name in file fd.
Returns
-------
name : str
The data name.
nc : int
The number of data columns.
"""
name = None
while 1:
try:
line = fd.readline()
if (len(line) == 0): break
if len(line) == 1: continue
except:
raise StopIteration
line = line.strip().split()
if (len(line) == 3) and (line[0] == '#'):
name = line[1]
nc = int(line[2])
yield name, nc
class Probe(Struct):
"""
Base class for all point probes. Enforces two points minimum.
"""
cache = Struct(name='probe_shared_evaluate_cache')
is_cyclic = False
def __init__(self, name, share_geometry=True, n_point=None, **kwargs):
"""
Parameters
----------
name : str
The probe name, set automatically by the subclasses.
share_geometry : bool
Set to True to indicate that all the probes will work on the same
domain. Certain data are then computed only for the first probe and
cached.
n_point : int
The (fixed) number of probe points, when positive. When non-positive,
the number of points is adaptively increased starting from -n_point,
until the neighboring point distance is less than the diameter of the
elements enclosing the points. When None, it is set to -10.
For additional parameters see the __init__() docstrings of the
subclasses.
"""
Struct.__init__(self, name=name, share_geometry=share_geometry,
**kwargs)
self.set_n_point(n_point)
self.options = | Struct(close_limit=0.1, size_hint=None) | sfepy.base.base.Struct |
"""Classes for probing values of Variables, for example, along a line."""
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import get_default, basestr, Struct
from sfepy.linalg import make_axis_rotation_matrix, norm_l2_along_axis
import six
def write_results(filename, probe, results):
"""
Write probing results into a file.
Parameters
----------
filename : str or file object
The output file name.
probe : Probe subclass instance
The probe used to obtain the results.
results : dict
The dictionary of probing results. Keys are data names, values are
the probed values.
"""
fd = open(filename, 'w') if isinstance(filename, basestr) else filename
fd.write('\n'.join(probe.report()) + '\n')
for key, result in six.iteritems(results):
pars, vals = result
fd.write('\n# %s %d\n' % (key, vals.shape[-1]))
if vals.ndim == 1:
aux = nm.hstack((pars[:,None], vals[:,None]))
else:
aux = nm.hstack((pars[:,None], vals))
nm.savetxt(fd, aux)
if isinstance(filename, basestr):
fd.close()
def read_results(filename, only_names=None):
"""
Read probing results from a file.
Parameters
----------
filename : str or file object
The probe results file name.
Returns
-------
header : Struct instance
The probe data header.
results : dict
The dictionary of probing results. Keys are data names, values are
the probed values.
"""
from sfepy.base.ioutils import read_array
only_names = get_default(only_names, [])
fd = open(filename, 'r') if isinstance(filename, basestr) else filename
header = read_header(fd)
results = {}
for name, nc in get_data_name(fd):
if name not in only_names: continue
result = read_array(fd, header.n_point, nc + 1, nm.float64)
results[name] = result
return header, results
def read_header(fd):
"""
Read the probe data header from file descriptor fd.
Returns
-------
header : Struct instance
The probe data header.
"""
header = Struct(name='probe_data_header')
header.probe_class = fd.readline().strip()
aux = fd.readline().strip().split(':')[1]
header.n_point = int(aux.strip().split()[0])
details = []
while 1:
line = fd.readline().strip()
if line == '-----':
break
else:
details.append(line)
header.details = '\n'.join(details)
return header
def get_data_name(fd):
"""
Try to read next data name in file fd.
Returns
-------
name : str
The data name.
nc : int
The number of data columns.
"""
name = None
while 1:
try:
line = fd.readline()
if (len(line) == 0): break
if len(line) == 1: continue
except:
raise StopIteration
line = line.strip().split()
if (len(line) == 3) and (line[0] == '#'):
name = line[1]
nc = int(line[2])
yield name, nc
class Probe(Struct):
"""
Base class for all point probes. Enforces two points minimum.
"""
cache = Struct(name='probe_shared_evaluate_cache')
is_cyclic = False
def __init__(self, name, share_geometry=True, n_point=None, **kwargs):
"""
Parameters
----------
name : str
The probe name, set automatically by the subclasses.
share_geometry : bool
Set to True to indicate that all the probes will work on the same
domain. Certain data are then computed only for the first probe and
cached.
n_point : int
The (fixed) number of probe points, when positive. When non-positive,
the number of points is adaptively increased starting from -n_point,
until the neighboring point distance is less than the diameter of the
elements enclosing the points. When None, it is set to -10.
For additional parameters see the __init__() docstrings of the
subclasses.
"""
Struct.__init__(self, name=name, share_geometry=share_geometry,
**kwargs)
self.set_n_point(n_point)
self.options = Struct(close_limit=0.1, size_hint=None)
self.cache = | Struct(name='probe_local_evaluate_cache') | sfepy.base.base.Struct |
"""Classes for probing values of Variables, for example, along a line."""
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import get_default, basestr, Struct
from sfepy.linalg import make_axis_rotation_matrix, norm_l2_along_axis
import six
def write_results(filename, probe, results):
"""
Write probing results into a file.
Parameters
----------
filename : str or file object
The output file name.
probe : Probe subclass instance
The probe used to obtain the results.
results : dict
The dictionary of probing results. Keys are data names, values are
the probed values.
"""
fd = open(filename, 'w') if isinstance(filename, basestr) else filename
fd.write('\n'.join(probe.report()) + '\n')
for key, result in six.iteritems(results):
pars, vals = result
fd.write('\n# %s %d\n' % (key, vals.shape[-1]))
if vals.ndim == 1:
aux = nm.hstack((pars[:,None], vals[:,None]))
else:
aux = nm.hstack((pars[:,None], vals))
nm.savetxt(fd, aux)
if isinstance(filename, basestr):
fd.close()
def read_results(filename, only_names=None):
"""
Read probing results from a file.
Parameters
----------
filename : str or file object
The probe results file name.
Returns
-------
header : Struct instance
The probe data header.
results : dict
The dictionary of probing results. Keys are data names, values are
the probed values.
"""
from sfepy.base.ioutils import read_array
only_names = get_default(only_names, [])
fd = open(filename, 'r') if isinstance(filename, basestr) else filename
header = read_header(fd)
results = {}
for name, nc in get_data_name(fd):
if name not in only_names: continue
result = read_array(fd, header.n_point, nc + 1, nm.float64)
results[name] = result
return header, results
def read_header(fd):
"""
Read the probe data header from file descriptor fd.
Returns
-------
header : Struct instance
The probe data header.
"""
header = Struct(name='probe_data_header')
header.probe_class = fd.readline().strip()
aux = fd.readline().strip().split(':')[1]
header.n_point = int(aux.strip().split()[0])
details = []
while 1:
line = fd.readline().strip()
if line == '-----':
break
else:
details.append(line)
header.details = '\n'.join(details)
return header
def get_data_name(fd):
"""
Try to read next data name in file fd.
Returns
-------
name : str
The data name.
nc : int
The number of data columns.
"""
name = None
while 1:
try:
line = fd.readline()
if (len(line) == 0): break
if len(line) == 1: continue
except:
raise StopIteration
line = line.strip().split()
if (len(line) == 3) and (line[0] == '#'):
name = line[1]
nc = int(line[2])
yield name, nc
class Probe(Struct):
"""
Base class for all point probes. Enforces two points minimum.
"""
cache = Struct(name='probe_shared_evaluate_cache')
is_cyclic = False
def __init__(self, name, share_geometry=True, n_point=None, **kwargs):
"""
Parameters
----------
name : str
The probe name, set automatically by the subclasses.
share_geometry : bool
Set to True to indicate that all the probes will work on the same
domain. Certain data are then computed only for the first probe and
cached.
n_point : int
The (fixed) number of probe points, when positive. When non-positive,
the number of points is adaptively increased starting from -n_point,
until the neighboring point distance is less than the diameter of the
elements enclosing the points. When None, it is set to -10.
For additional parameters see the __init__() docstrings of the
subclasses.
"""
Struct.__init__(self, name=name, share_geometry=share_geometry,
**kwargs)
self.set_n_point(n_point)
self.options = Struct(close_limit=0.1, size_hint=None)
self.cache = Struct(name='probe_local_evaluate_cache')
self.is_refined = False
def get_evaluate_cache(self):
"""
Return the evaluate cache for domain-related data given by
`self.share_geometry`.
"""
return Probe.cache if self.share_geometry else self.cache
def set_n_point(self, n_point):
"""
Set the number of probe points.
Parameters
----------
n_point : int
The (fixed) number of probe points, when positive. When non-positive,
the number of points is adaptively increased starting from -n_point,
until the neighboring point distance is less than the diameter of the
elements enclosing the points. When None, it is set to -10.
"""
if n_point is None:
n_point = -10
if n_point <= 0:
n_point = max(-n_point, 2)
self.n_point_required = -1
else:
n_point = max(n_point, 2)
self.n_point_required = n_point
self.n_point0 = self.n_point = n_point
def set_options(self, close_limit=None, size_hint=None):
"""
Set the probe options.
Parameters
----------
close_limit : float
The maximum limit distance of a point from the closest
element allowed for extrapolation.
size_hint : float
Element size hint for the refinement of probe parametrization.
"""
if close_limit is not None:
self.options.close_limit = close_limit
if size_hint is not None:
self.options.size_hint = size_hint
def report(self):
"""Report the probe parameters."""
out = [self.__class__.__name__]
if self.n_point_required == -1:
aux = 'adaptive'
else:
aux = 'fixed'
out.append('number of points: %s (%s)' % (self.n_point, aux))
return out
def __call__(self, variable, **kwargs):
"""
Probe the given variable. The actual implementation is in self.probe(),
so that it can be overridden in subclasses.
Parameters
----------
variable : Variable instance
The variable to be sampled along the probe.
**kwargs : additional arguments
See :func:`Probe.probe()`.
"""
return self.probe(variable, **kwargs)
def probe(self, variable, mode='val', ret_points=False):
"""
Probe the given variable.
Parameters
----------
variable : Variable instance
The variable to be sampled along the probe.
mode : {'val', 'grad'}, optional
The evaluation mode: the variable value (default) or the
variable value gradient.
ret_points : bool
If True, return also the probe points.
Returns
-------
pars : array
The parametrization of the probe points.
points : array, optional
If `ret_points` is True, the coordinates of points corresponding to
`pars`, where the `variable` is evaluated.
vals : array
The probed values.
"""
refine_flag = None
ev = variable.evaluate_at
field = variable.field
cache = field.get_evaluate_cache(cache=self.get_evaluate_cache(),
share_geometry=self.share_geometry)
self.reset_refinement()
while True:
pars, points = self.get_points(refine_flag)
if not nm.isfinite(points).all():
raise ValueError('Inf/nan in probe points!')
vals, cells = ev(points, mode=mode, strategy='general',
close_limit=self.options.close_limit,
cache=cache, ret_cells=True)
if self.is_refined:
break
else:
refine_flag = self.refine_points(variable, points, cells)
if (refine_flag == False).all():
break
self.is_refined = True
if ret_points:
return pars, points, vals
else:
return pars, vals
def reset_refinement(self):
"""
Reset the probe refinement state.
"""
self.is_refined = False
self.n_point = self.n_point0
def refine_points(self, variable, points, cells):
"""
Mark intervals between points for a refinement, based on element
sizes at those points. Assumes the points to be ordered.
Returns
-------
refine_flag : bool array
True at places corresponding to intervals between subsequent points
that need to be refined.
"""
if self.n_point_required == self.n_point:
refine_flag = nm.array([False])
else:
if self.options.size_hint is None:
ed = variable.get_element_diameters(cells, 0)
pd = 0.5 * (ed[1:] + ed[:-1])
else:
pd = self.options.size_hint
dist = | norm_l2_along_axis(points[1:] - points[:-1]) | sfepy.linalg.norm_l2_along_axis |
"""Classes for probing values of Variables, for example, along a line."""
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import get_default, basestr, Struct
from sfepy.linalg import make_axis_rotation_matrix, norm_l2_along_axis
import six
def write_results(filename, probe, results):
"""
Write probing results into a file.
Parameters
----------
filename : str or file object
The output file name.
probe : Probe subclass instance
The probe used to obtain the results.
results : dict
The dictionary of probing results. Keys are data names, values are
the probed values.
"""
fd = open(filename, 'w') if isinstance(filename, basestr) else filename
fd.write('\n'.join(probe.report()) + '\n')
for key, result in six.iteritems(results):
pars, vals = result
fd.write('\n# %s %d\n' % (key, vals.shape[-1]))
if vals.ndim == 1:
aux = nm.hstack((pars[:,None], vals[:,None]))
else:
aux = nm.hstack((pars[:,None], vals))
nm.savetxt(fd, aux)
if isinstance(filename, basestr):
fd.close()
def read_results(filename, only_names=None):
"""
Read probing results from a file.
Parameters
----------
filename : str or file object
The probe results file name.
Returns
-------
header : Struct instance
The probe data header.
results : dict
The dictionary of probing results. Keys are data names, values are
the probed values.
"""
from sfepy.base.ioutils import read_array
only_names = get_default(only_names, [])
fd = open(filename, 'r') if isinstance(filename, basestr) else filename
header = read_header(fd)
results = {}
for name, nc in get_data_name(fd):
if name not in only_names: continue
result = read_array(fd, header.n_point, nc + 1, nm.float64)
results[name] = result
return header, results
def read_header(fd):
"""
Read the probe data header from file descriptor fd.
Returns
-------
header : Struct instance
The probe data header.
"""
header = Struct(name='probe_data_header')
header.probe_class = fd.readline().strip()
aux = fd.readline().strip().split(':')[1]
header.n_point = int(aux.strip().split()[0])
details = []
while 1:
line = fd.readline().strip()
if line == '-----':
break
else:
details.append(line)
header.details = '\n'.join(details)
return header
def get_data_name(fd):
"""
Try to read next data name in file fd.
Returns
-------
name : str
The data name.
nc : int
The number of data columns.
"""
name = None
while 1:
try:
line = fd.readline()
if (len(line) == 0): break
if len(line) == 1: continue
except:
raise StopIteration
line = line.strip().split()
if (len(line) == 3) and (line[0] == '#'):
name = line[1]
nc = int(line[2])
yield name, nc
class Probe(Struct):
"""
Base class for all point probes. Enforces two points minimum.
"""
cache = Struct(name='probe_shared_evaluate_cache')
is_cyclic = False
def __init__(self, name, share_geometry=True, n_point=None, **kwargs):
"""
Parameters
----------
name : str
The probe name, set automatically by the subclasses.
share_geometry : bool
Set to True to indicate that all the probes will work on the same
domain. Certain data are then computed only for the first probe and
cached.
n_point : int
The (fixed) number of probe points, when positive. When non-positive,
the number of points is adaptively increased starting from -n_point,
until the neighboring point distance is less than the diameter of the
elements enclosing the points. When None, it is set to -10.
For additional parameters see the __init__() docstrings of the
subclasses.
"""
Struct.__init__(self, name=name, share_geometry=share_geometry,
**kwargs)
self.set_n_point(n_point)
self.options = Struct(close_limit=0.1, size_hint=None)
self.cache = Struct(name='probe_local_evaluate_cache')
self.is_refined = False
def get_evaluate_cache(self):
"""
Return the evaluate cache for domain-related data given by
`self.share_geometry`.
"""
return Probe.cache if self.share_geometry else self.cache
def set_n_point(self, n_point):
"""
Set the number of probe points.
Parameters
----------
n_point : int
The (fixed) number of probe points, when positive. When non-positive,
the number of points is adaptively increased starting from -n_point,
until the neighboring point distance is less than the diameter of the
elements enclosing the points. When None, it is set to -10.
"""
if n_point is None:
n_point = -10
if n_point <= 0:
n_point = max(-n_point, 2)
self.n_point_required = -1
else:
n_point = max(n_point, 2)
self.n_point_required = n_point
self.n_point0 = self.n_point = n_point
def set_options(self, close_limit=None, size_hint=None):
"""
Set the probe options.
Parameters
----------
close_limit : float
The maximum limit distance of a point from the closest
element allowed for extrapolation.
size_hint : float
Element size hint for the refinement of probe parametrization.
"""
if close_limit is not None:
self.options.close_limit = close_limit
if size_hint is not None:
self.options.size_hint = size_hint
def report(self):
"""Report the probe parameters."""
out = [self.__class__.__name__]
if self.n_point_required == -1:
aux = 'adaptive'
else:
aux = 'fixed'
out.append('number of points: %s (%s)' % (self.n_point, aux))
return out
def __call__(self, variable, **kwargs):
"""
Probe the given variable. The actual implementation is in self.probe(),
so that it can be overridden in subclasses.
Parameters
----------
variable : Variable instance
The variable to be sampled along the probe.
**kwargs : additional arguments
See :func:`Probe.probe()`.
"""
return self.probe(variable, **kwargs)
def probe(self, variable, mode='val', ret_points=False):
"""
Probe the given variable.
Parameters
----------
variable : Variable instance
The variable to be sampled along the probe.
mode : {'val', 'grad'}, optional
The evaluation mode: the variable value (default) or the
variable value gradient.
ret_points : bool
If True, return also the probe points.
Returns
-------
pars : array
The parametrization of the probe points.
points : array, optional
If `ret_points` is True, the coordinates of points corresponding to
`pars`, where the `variable` is evaluated.
vals : array
The probed values.
"""
refine_flag = None
ev = variable.evaluate_at
field = variable.field
cache = field.get_evaluate_cache(cache=self.get_evaluate_cache(),
share_geometry=self.share_geometry)
self.reset_refinement()
while True:
pars, points = self.get_points(refine_flag)
if not nm.isfinite(points).all():
raise ValueError('Inf/nan in probe points!')
vals, cells = ev(points, mode=mode, strategy='general',
close_limit=self.options.close_limit,
cache=cache, ret_cells=True)
if self.is_refined:
break
else:
refine_flag = self.refine_points(variable, points, cells)
if (refine_flag == False).all():
break
self.is_refined = True
if ret_points:
return pars, points, vals
else:
return pars, vals
def reset_refinement(self):
"""
Reset the probe refinement state.
"""
self.is_refined = False
self.n_point = self.n_point0
def refine_points(self, variable, points, cells):
"""
Mark intervals between points for a refinement, based on element
sizes at those points. Assumes the points to be ordered.
Returns
-------
refine_flag : bool array
True at places corresponding to intervals between subsequent points
that need to be refined.
"""
if self.n_point_required == self.n_point:
refine_flag = nm.array([False])
else:
if self.options.size_hint is None:
ed = variable.get_element_diameters(cells, 0)
pd = 0.5 * (ed[1:] + ed[:-1])
else:
pd = self.options.size_hint
dist = norm_l2_along_axis(points[1:] - points[:-1])
refine_flag = dist > pd
if self.is_cyclic:
pd1 = 0.5 * (ed[0] + ed[-1])
dist1 = nla.norm(points[0] - points[-1])
refine_flag = nm.r_[refine_flag, dist1 > pd1]
return refine_flag
@staticmethod
def refine_pars(pars, refine_flag, cyclic_val=None):
"""
Refine the probe parametrization based on the refine_flag.
"""
ii = nm.where(refine_flag)[0]
ip = ii + 1
if cyclic_val is not None:
cpars = nm.r_[pars, cyclic_val]
pp = 0.5 * (cpars[ip] + cpars[ii])
else:
pp = 0.5 * (pars[ip] + pars[ii])
pars = nm.insert(pars, ip, pp)
return pars
class PointsProbe(Probe):
"""
Probe variables in given points.
"""
def __init__(self, points, share_geometry=True):
"""
Parameters
----------
points : array_like
The coordinates of the points.
"""
points = nm.array(points, dtype=nm.float64, order='C')
if points.ndim == 1:
points.shape = points.shape + (1,)
n_point = points.shape[0]
name = 'points %d' % n_point
Probe.__init__(self, name=name, share_geometry=share_geometry,
points=points, n_point=n_point)
self.n_point_single = n_point
def report(self):
"""Report the probe parameters."""
out = Probe.report(self)
for ii, point in enumerate(self.points):
out.append('point %d: %s' % (ii, point))
out.append('-----')
return out
def refine_points(self, variable, points, cache):
"""No refinement for this probe."""
refine_flag = nm.array([False])
return refine_flag
def get_points(self, refine_flag=None):
"""
Get the probe points.
Returns
-------
pars : array_like
The independent coordinate of the probe.
points : array_like
The probe points, parametrized by pars.
"""
pars = nm.arange(self.n_point, dtype=nm.float64)
return pars, self.points
class LineProbe(Probe):
"""
Probe variables along a line.
If n_point is positive, that number of evenly spaced points is used. If
n_point is None or non-positive, an adaptive refinement based on element
diameters is used and the number of points and their spacing are determined
automatically. If it is negative, -n_point is used as an initial guess.
"""
def __init__(self, p0, p1, n_point, share_geometry=True):
"""
Parameters
----------
p0 : array_like
The coordinates of the start point.
p1 : array_like
The coordinates of the end point.
"""
p0 = nm.array(p0, dtype=nm.float64)
p1 = nm.array(p1, dtype=nm.float64)
name = 'line [%s, %s]' % (p0, p1)
Probe.__init__(self, name=name, share_geometry=share_geometry,
p0=p0, p1=p1, n_point=n_point)
dirvec = self.p1 - self.p0
self.length = nm.linalg.norm(dirvec)
self.dirvec = dirvec / self.length
def report(self):
"""Report the probe parameters."""
out = Probe.report(self)
out.append('point 0: %s' % self.p0)
out.append('point 1: %s' % self.p1)
out.append('-----')
return out
def get_points(self, refine_flag=None):
"""
Get the probe points.
Returns
-------
pars : array_like
The independent coordinate of the probe.
points : array_like
The probe points, parametrized by pars.
"""
if self.is_refined:
return self.pars, self.points
if refine_flag is None:
pars = nm.linspace(0, self.length, self.n_point)
else:
pars = Probe.refine_pars(self.pars, refine_flag)
self.n_point = pars.shape[0]
self.pars = pars
self.points = self.p0 + self.dirvec * pars[:,None]
return pars, self.points
class RayProbe(Probe):
"""
Probe variables along a ray. The points are parametrized by a function of
radial coordinates from a given point in a given direction.
"""
def __init__(self, p0, dirvec, p_fun, n_point, both_dirs,
share_geometry=True):
"""
Parameters
----------
p0 : array_like
The coordinates of the start point.
dirvec : array_like
The probe direction vector.
p_fun : function
The function returning the probe parametrization along the dirvec
direction.
both_dirs : bool
If True, the probe works, starting at p0, symmetrically in both
dirvec and -dirvec directions.
"""
p0 = nm.array(p0, dtype=nm.float64)
dirvec = nm.array(dirvec, dtype=nm.float64)
dirvec /= nla.norm(dirvec)
name = 'ray %s [%s, %s]' % (p_fun.__name__, p0, dirvec)
if both_dirs:
n_point_true = 2 * n_point
else:
n_point_true = n_point
Probe.__init__(self, name=name, share_geometry=share_geometry,
p0=p0, dirvec=dirvec, p_fun=p_fun,
n_point=n_point_true, both_dirs=both_dirs)
self.n_point_single = n_point
def report(self):
"""Report the probe parameters."""
out = Probe.report(self)
out.append('point 0: %s' % self.p0)
out.append('direction vector: %s' % self.dirvec)
out.append('both directions: %s' % self.both_dirs)
out.append('distribution function: %s' % self.p_fun.__name__)
out.append('-----')
return out
def refine_points(self, variable, points, cache):
"""No refinement for this probe."""
refine_flag = nm.array([False])
return refine_flag
def gen_points(self, sign):
"""Generate the probe points and their parametrization."""
pars = self.p_fun(nm.arange(self.n_point_single, dtype=nm.float64))
points = self.p0 + sign * self.dirvec * pars[:,None]
return pars, points
def get_points(self, refine_flag=None):
"""
Get the probe points.
Returns
-------
pars : array_like
The independent coordinate of the probe.
points : array_like
The probe points, parametrized by pars.
"""
pars, points = self.gen_points(1.0)
if self.both_dirs:
pars0, points0 = self.gen_points(-1.0)
pars = nm.concatenate((-pars0[::-1], pars))
points = nm.concatenate((points0[::-1], points))
return pars, points
class CircleProbe(Probe):
"""
Probe variables along a circle.
If n_point is positive, that number of evenly spaced points is used. If
n_point is None or non-positive, an adaptive refinement based on element
diameters is used and the number of points and their spacing are determined
automatically. If it is negative, -n_point is used as an initial guess.
"""
is_cyclic = True
def __init__(self, centre, normal, radius, n_point, share_geometry=True):
"""
Parameters
----------
centre : array_like
The coordinates of the circle centre.
normal : array_like
The normal vector perpendicular to the circle plane.
radius : float
The radius of the circle.
"""
centre = nm.array(centre, dtype=nm.float64)
normal = nm.array(normal, dtype=nm.float64)
normal /= nla.norm(normal)
name = 'circle [%s, %s, %s]' % (centre, normal, radius)
Probe.__init__(self, name=name, share_geometry=share_geometry,
centre=centre, normal=normal,
radius=radius, n_point=n_point)
def report(self):
"""Report the probe parameters."""
out = Probe.report(self)
out.append('centre: %s' % self.centre)
out.append('normal: %s' % self.normal)
out.append('radius: %s' % self.radius)
out.append('-----')
return out
def get_points(self, refine_flag=None):
"""
Get the probe points.
Returns
-------
pars : array_like
The independent coordinate of the probe.
points : array_like
The probe points, parametrized by pars.
"""
# Vector of angles.
if self.is_refined:
return self.pars, self.points
if refine_flag is None:
pars = nm.linspace(0.0, 2.0*nm.pi, self.n_point + 1)[:-1]
else:
pars = Probe.refine_pars(self.pars, refine_flag,
cyclic_val=2.0 * nm.pi)
self.n_point = pars.shape[0]
self.pars = pars
# Create the points in xy plane, centered at the origin.
x = self.radius * nm.cos(pars[:,None])
y = self.radius * nm.sin(pars[:,None])
if len(self.centre) == 3:
z = nm.zeros((self.n_point, 1), dtype=nm.float64)
points = nm.c_[x, y, z]
# Rotate to satisfy the normal, shift to the centre.
n1 = nm.array([0.0, 0.0, 1.0], dtype=nm.float64)
axis = nm.cross(n1, self.normal)
angle = nm.arccos(nm.dot(n1, self.normal))
if nla.norm(axis) < 0.1:
# n1 == self.normal
rot_mtx = nm.eye(3, dtype=nm.float64)
else:
rot_mtx = | make_axis_rotation_matrix(axis, angle) | sfepy.linalg.make_axis_rotation_matrix |
#!/usr/bin/env python
"""
Compare various elastic materials w.r.t. uniaxial tension/compression test.
Requires Matplotlib.
"""
from optparse import OptionParser
import sys
sys.path.append('.')
import numpy as nm
def define():
"""Define the problem to solve."""
from sfepy.discrete.fem.meshio import UserMeshIO
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.mechanics.matcoefs import stiffness_from_lame
def mesh_hook(mesh, mode):
"""
Generate the block mesh.
"""
if mode == 'read':
mesh = gen_block_mesh([2, 2, 3], [2, 2, 4], [0, 0, 1.5], name='el3',
verbose=False)
return mesh
elif mode == 'write':
pass
filename_mesh = | UserMeshIO(mesh_hook) | sfepy.discrete.fem.meshio.UserMeshIO |
#!/usr/bin/env python
"""
Compare various elastic materials w.r.t. uniaxial tension/compression test.
Requires Matplotlib.
"""
from optparse import OptionParser
import sys
sys.path.append('.')
import numpy as nm
def define():
"""Define the problem to solve."""
from sfepy.discrete.fem.meshio import UserMeshIO
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.mechanics.matcoefs import stiffness_from_lame
def mesh_hook(mesh, mode):
"""
Generate the block mesh.
"""
if mode == 'read':
mesh = gen_block_mesh([2, 2, 3], [2, 2, 4], [0, 0, 1.5], name='el3',
verbose=False)
return mesh
elif mode == 'write':
pass
filename_mesh = UserMeshIO(mesh_hook)
options = {
'nls' : 'newton',
'ls' : 'ls',
'ts' : 'ts',
'save_steps' : -1,
}
functions = {
'linear_tension' : (linear_tension,),
'linear_compression' : (linear_compression,),
'empty' : (lambda ts, coor, mode, region, ig: None,),
}
fields = {
'displacement' : ('real', 3, 'Omega', 1),
}
# Coefficients are chosen so that the tangent stiffness is the same for all
# material for zero strains.
# Young modulus = 10 kPa, Poisson's ratio = 0.3
materials = {
'solid' : ({
'K' : 8.333, # bulk modulus
'mu_nh' : 3.846, # shear modulus of neoHookean term
'mu_mr' : 1.923, # shear modulus of Mooney-Rivlin term
'kappa' : 1.923, # second modulus of Mooney-Rivlin term
# elasticity for LE term
'D' : stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
},),
'load' : 'empty',
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
regions = {
'Omega' : 'all',
'Bottom' : ('vertices in (z < 0.1)', 'facet'),
'Top' : ('vertices in (z > 2.9)', 'facet'),
}
ebcs = {
'fixb' : ('Bottom', {'u.all' : 0.0}),
'fixt' : ('Top', {'u.[0,1]' : 0.0}),
}
integrals = {
'i' : 1,
'isurf' : 2,
}
equations = {
'linear' : """dw_lin_elastic.i.Omega(solid.D, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'neo-Hookean' : """dw_tl_he_neohook.i.Omega(solid.mu_nh, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'Mooney-Rivlin' : """dw_tl_he_neohook.i.Omega(solid.mu_mr, v, u)
+ dw_tl_he_mooney_rivlin.i.Omega(solid.kappa, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 5,
'eps_a' : 1e-10,
'eps_r' : 1.0,
}),
'ts' : ('ts.simple', {
't0' : 0,
't1' : 1,
'dt' : None,
'n_step' : 101, # has precedence over dt!
}),
}
return locals()
##
# Pressure tractions.
def linear_tension(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def linear_compression(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(-0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def store_top_u(displacements):
"""Function _store() will be called at the end of each loading step. Top
displacements will be stored into `displacements`."""
def _store(problem, ts, state):
top = problem.domain.regions['Top']
top_u = problem.get_variables()['u'].get_state_in_region(top)
displacements.append(nm.mean(top_u[:,-1]))
return _store
def solve_branch(problem, branch_function):
displacements = {}
for key, eq in problem.conf.equations.iteritems():
problem.set_equations({key : eq})
load = problem.get_materials()['load']
load.set_function(branch_function)
time_solver = problem.get_time_solver()
time_solver.init_time()
out = []
for _ in time_solver(save_results=False, step_hook=store_top_u(out)):
pass
displacements[key] = nm.array(out, dtype=nm.float64)
return displacements
usage = '%prog [options]\n' + __doc__.rstrip()
helps = {
'no_plot' : 'do not show plot window',
}
def main():
from sfepy.base.base import output
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.discrete import Problem
from sfepy.base.plotutils import plt
parser = OptionParser(usage=usage, version='%prog')
parser.add_option('-n', '--no-plot',
action="store_true", dest='no_plot',
default=False, help=helps['no_plot'])
options, args = parser.parse_args()
required, other = | get_standard_keywords() | sfepy.base.conf.get_standard_keywords |
#!/usr/bin/env python
"""
Compare various elastic materials w.r.t. uniaxial tension/compression test.
Requires Matplotlib.
"""
from optparse import OptionParser
import sys
sys.path.append('.')
import numpy as nm
def define():
"""Define the problem to solve."""
from sfepy.discrete.fem.meshio import UserMeshIO
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.mechanics.matcoefs import stiffness_from_lame
def mesh_hook(mesh, mode):
"""
Generate the block mesh.
"""
if mode == 'read':
mesh = gen_block_mesh([2, 2, 3], [2, 2, 4], [0, 0, 1.5], name='el3',
verbose=False)
return mesh
elif mode == 'write':
pass
filename_mesh = UserMeshIO(mesh_hook)
options = {
'nls' : 'newton',
'ls' : 'ls',
'ts' : 'ts',
'save_steps' : -1,
}
functions = {
'linear_tension' : (linear_tension,),
'linear_compression' : (linear_compression,),
'empty' : (lambda ts, coor, mode, region, ig: None,),
}
fields = {
'displacement' : ('real', 3, 'Omega', 1),
}
# Coefficients are chosen so that the tangent stiffness is the same for all
# material for zero strains.
# Young modulus = 10 kPa, Poisson's ratio = 0.3
materials = {
'solid' : ({
'K' : 8.333, # bulk modulus
'mu_nh' : 3.846, # shear modulus of neoHookean term
'mu_mr' : 1.923, # shear modulus of Mooney-Rivlin term
'kappa' : 1.923, # second modulus of Mooney-Rivlin term
# elasticity for LE term
'D' : stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
},),
'load' : 'empty',
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
regions = {
'Omega' : 'all',
'Bottom' : ('vertices in (z < 0.1)', 'facet'),
'Top' : ('vertices in (z > 2.9)', 'facet'),
}
ebcs = {
'fixb' : ('Bottom', {'u.all' : 0.0}),
'fixt' : ('Top', {'u.[0,1]' : 0.0}),
}
integrals = {
'i' : 1,
'isurf' : 2,
}
equations = {
'linear' : """dw_lin_elastic.i.Omega(solid.D, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'neo-Hookean' : """dw_tl_he_neohook.i.Omega(solid.mu_nh, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'Mooney-Rivlin' : """dw_tl_he_neohook.i.Omega(solid.mu_mr, v, u)
+ dw_tl_he_mooney_rivlin.i.Omega(solid.kappa, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 5,
'eps_a' : 1e-10,
'eps_r' : 1.0,
}),
'ts' : ('ts.simple', {
't0' : 0,
't1' : 1,
'dt' : None,
'n_step' : 101, # has precedence over dt!
}),
}
return locals()
##
# Pressure tractions.
def linear_tension(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def linear_compression(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(-0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def store_top_u(displacements):
"""Function _store() will be called at the end of each loading step. Top
displacements will be stored into `displacements`."""
def _store(problem, ts, state):
top = problem.domain.regions['Top']
top_u = problem.get_variables()['u'].get_state_in_region(top)
displacements.append(nm.mean(top_u[:,-1]))
return _store
def solve_branch(problem, branch_function):
displacements = {}
for key, eq in problem.conf.equations.iteritems():
problem.set_equations({key : eq})
load = problem.get_materials()['load']
load.set_function(branch_function)
time_solver = problem.get_time_solver()
time_solver.init_time()
out = []
for _ in time_solver(save_results=False, step_hook=store_top_u(out)):
pass
displacements[key] = nm.array(out, dtype=nm.float64)
return displacements
usage = '%prog [options]\n' + __doc__.rstrip()
helps = {
'no_plot' : 'do not show plot window',
}
def main():
from sfepy.base.base import output
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.discrete import Problem
from sfepy.base.plotutils import plt
parser = OptionParser(usage=usage, version='%prog')
parser.add_option('-n', '--no-plot',
action="store_true", dest='no_plot',
default=False, help=helps['no_plot'])
options, args = parser.parse_args()
required, other = get_standard_keywords()
# Use this file as the input file.
conf = | ProblemConf.from_file(__file__, required, other) | sfepy.base.conf.ProblemConf.from_file |
#!/usr/bin/env python
"""
Compare various elastic materials w.r.t. uniaxial tension/compression test.
Requires Matplotlib.
"""
from optparse import OptionParser
import sys
sys.path.append('.')
import numpy as nm
def define():
"""Define the problem to solve."""
from sfepy.discrete.fem.meshio import UserMeshIO
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.mechanics.matcoefs import stiffness_from_lame
def mesh_hook(mesh, mode):
"""
Generate the block mesh.
"""
if mode == 'read':
mesh = gen_block_mesh([2, 2, 3], [2, 2, 4], [0, 0, 1.5], name='el3',
verbose=False)
return mesh
elif mode == 'write':
pass
filename_mesh = UserMeshIO(mesh_hook)
options = {
'nls' : 'newton',
'ls' : 'ls',
'ts' : 'ts',
'save_steps' : -1,
}
functions = {
'linear_tension' : (linear_tension,),
'linear_compression' : (linear_compression,),
'empty' : (lambda ts, coor, mode, region, ig: None,),
}
fields = {
'displacement' : ('real', 3, 'Omega', 1),
}
# Coefficients are chosen so that the tangent stiffness is the same for all
# material for zero strains.
# Young modulus = 10 kPa, Poisson's ratio = 0.3
materials = {
'solid' : ({
'K' : 8.333, # bulk modulus
'mu_nh' : 3.846, # shear modulus of neoHookean term
'mu_mr' : 1.923, # shear modulus of Mooney-Rivlin term
'kappa' : 1.923, # second modulus of Mooney-Rivlin term
# elasticity for LE term
'D' : stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
},),
'load' : 'empty',
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
regions = {
'Omega' : 'all',
'Bottom' : ('vertices in (z < 0.1)', 'facet'),
'Top' : ('vertices in (z > 2.9)', 'facet'),
}
ebcs = {
'fixb' : ('Bottom', {'u.all' : 0.0}),
'fixt' : ('Top', {'u.[0,1]' : 0.0}),
}
integrals = {
'i' : 1,
'isurf' : 2,
}
equations = {
'linear' : """dw_lin_elastic.i.Omega(solid.D, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'neo-Hookean' : """dw_tl_he_neohook.i.Omega(solid.mu_nh, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'Mooney-Rivlin' : """dw_tl_he_neohook.i.Omega(solid.mu_mr, v, u)
+ dw_tl_he_mooney_rivlin.i.Omega(solid.kappa, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 5,
'eps_a' : 1e-10,
'eps_r' : 1.0,
}),
'ts' : ('ts.simple', {
't0' : 0,
't1' : 1,
'dt' : None,
'n_step' : 101, # has precedence over dt!
}),
}
return locals()
##
# Pressure tractions.
def linear_tension(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def linear_compression(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(-0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def store_top_u(displacements):
"""Function _store() will be called at the end of each loading step. Top
displacements will be stored into `displacements`."""
def _store(problem, ts, state):
top = problem.domain.regions['Top']
top_u = problem.get_variables()['u'].get_state_in_region(top)
displacements.append(nm.mean(top_u[:,-1]))
return _store
def solve_branch(problem, branch_function):
displacements = {}
for key, eq in problem.conf.equations.iteritems():
problem.set_equations({key : eq})
load = problem.get_materials()['load']
load.set_function(branch_function)
time_solver = problem.get_time_solver()
time_solver.init_time()
out = []
for _ in time_solver(save_results=False, step_hook=store_top_u(out)):
pass
displacements[key] = nm.array(out, dtype=nm.float64)
return displacements
usage = '%prog [options]\n' + __doc__.rstrip()
helps = {
'no_plot' : 'do not show plot window',
}
def main():
from sfepy.base.base import output
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.discrete import Problem
from sfepy.base.plotutils import plt
parser = OptionParser(usage=usage, version='%prog')
parser.add_option('-n', '--no-plot',
action="store_true", dest='no_plot',
default=False, help=helps['no_plot'])
options, args = parser.parse_args()
required, other = get_standard_keywords()
# Use this file as the input file.
conf = ProblemConf.from_file(__file__, required, other)
# Create problem instance, but do not set equations.
problem = | Problem.from_conf(conf, init_equations=False) | sfepy.discrete.Problem.from_conf |
#!/usr/bin/env python
"""
Compare various elastic materials w.r.t. uniaxial tension/compression test.
Requires Matplotlib.
"""
from optparse import OptionParser
import sys
sys.path.append('.')
import numpy as nm
def define():
"""Define the problem to solve."""
from sfepy.discrete.fem.meshio import UserMeshIO
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.mechanics.matcoefs import stiffness_from_lame
def mesh_hook(mesh, mode):
"""
Generate the block mesh.
"""
if mode == 'read':
mesh = gen_block_mesh([2, 2, 3], [2, 2, 4], [0, 0, 1.5], name='el3',
verbose=False)
return mesh
elif mode == 'write':
pass
filename_mesh = UserMeshIO(mesh_hook)
options = {
'nls' : 'newton',
'ls' : 'ls',
'ts' : 'ts',
'save_steps' : -1,
}
functions = {
'linear_tension' : (linear_tension,),
'linear_compression' : (linear_compression,),
'empty' : (lambda ts, coor, mode, region, ig: None,),
}
fields = {
'displacement' : ('real', 3, 'Omega', 1),
}
# Coefficients are chosen so that the tangent stiffness is the same for all
# material for zero strains.
# Young modulus = 10 kPa, Poisson's ratio = 0.3
materials = {
'solid' : ({
'K' : 8.333, # bulk modulus
'mu_nh' : 3.846, # shear modulus of neoHookean term
'mu_mr' : 1.923, # shear modulus of Mooney-Rivlin term
'kappa' : 1.923, # second modulus of Mooney-Rivlin term
# elasticity for LE term
'D' : stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
},),
'load' : 'empty',
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
regions = {
'Omega' : 'all',
'Bottom' : ('vertices in (z < 0.1)', 'facet'),
'Top' : ('vertices in (z > 2.9)', 'facet'),
}
ebcs = {
'fixb' : ('Bottom', {'u.all' : 0.0}),
'fixt' : ('Top', {'u.[0,1]' : 0.0}),
}
integrals = {
'i' : 1,
'isurf' : 2,
}
equations = {
'linear' : """dw_lin_elastic.i.Omega(solid.D, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'neo-Hookean' : """dw_tl_he_neohook.i.Omega(solid.mu_nh, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'Mooney-Rivlin' : """dw_tl_he_neohook.i.Omega(solid.mu_mr, v, u)
+ dw_tl_he_mooney_rivlin.i.Omega(solid.kappa, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 5,
'eps_a' : 1e-10,
'eps_r' : 1.0,
}),
'ts' : ('ts.simple', {
't0' : 0,
't1' : 1,
'dt' : None,
'n_step' : 101, # has precedence over dt!
}),
}
return locals()
##
# Pressure tractions.
def linear_tension(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def linear_compression(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(-0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def store_top_u(displacements):
"""Function _store() will be called at the end of each loading step. Top
displacements will be stored into `displacements`."""
def _store(problem, ts, state):
top = problem.domain.regions['Top']
top_u = problem.get_variables()['u'].get_state_in_region(top)
displacements.append(nm.mean(top_u[:,-1]))
return _store
def solve_branch(problem, branch_function):
displacements = {}
for key, eq in problem.conf.equations.iteritems():
problem.set_equations({key : eq})
load = problem.get_materials()['load']
load.set_function(branch_function)
time_solver = problem.get_time_solver()
time_solver.init_time()
out = []
for _ in time_solver(save_results=False, step_hook=store_top_u(out)):
pass
displacements[key] = nm.array(out, dtype=nm.float64)
return displacements
usage = '%prog [options]\n' + __doc__.rstrip()
helps = {
'no_plot' : 'do not show plot window',
}
def main():
from sfepy.base.base import output
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.discrete import Problem
from sfepy.base.plotutils import plt
parser = OptionParser(usage=usage, version='%prog')
parser.add_option('-n', '--no-plot',
action="store_true", dest='no_plot',
default=False, help=helps['no_plot'])
options, args = parser.parse_args()
required, other = get_standard_keywords()
# Use this file as the input file.
conf = ProblemConf.from_file(__file__, required, other)
# Create problem instance, but do not set equations.
problem = Problem.from_conf(conf, init_equations=False)
# Solve the problem. Output is ignored, results stored by using the
# step_hook.
u_t = solve_branch(problem, linear_tension)
u_c = solve_branch(problem, linear_compression)
# Get pressure load by calling linear_*() for each time step.
ts = problem.get_timestepper()
load_t = nm.array([linear_tension(ts, nm.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from(0)],
dtype=nm.float64).squeeze()
load_c = nm.array([linear_compression(ts, nm.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from(0)],
dtype=nm.float64).squeeze()
# Join the branches.
displacements = {}
for key in u_t.keys():
displacements[key] = nm.r_[u_c[key][::-1], u_t[key]]
load = nm.r_[load_c[::-1], load_t]
if plt is None:
| output('matplotlib cannot be imported, printing raw data!') | sfepy.base.base.output |
#!/usr/bin/env python
"""
Compare various elastic materials w.r.t. uniaxial tension/compression test.
Requires Matplotlib.
"""
from optparse import OptionParser
import sys
sys.path.append('.')
import numpy as nm
def define():
"""Define the problem to solve."""
from sfepy.discrete.fem.meshio import UserMeshIO
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.mechanics.matcoefs import stiffness_from_lame
def mesh_hook(mesh, mode):
"""
Generate the block mesh.
"""
if mode == 'read':
mesh = gen_block_mesh([2, 2, 3], [2, 2, 4], [0, 0, 1.5], name='el3',
verbose=False)
return mesh
elif mode == 'write':
pass
filename_mesh = UserMeshIO(mesh_hook)
options = {
'nls' : 'newton',
'ls' : 'ls',
'ts' : 'ts',
'save_steps' : -1,
}
functions = {
'linear_tension' : (linear_tension,),
'linear_compression' : (linear_compression,),
'empty' : (lambda ts, coor, mode, region, ig: None,),
}
fields = {
'displacement' : ('real', 3, 'Omega', 1),
}
# Coefficients are chosen so that the tangent stiffness is the same for all
# material for zero strains.
# Young modulus = 10 kPa, Poisson's ratio = 0.3
materials = {
'solid' : ({
'K' : 8.333, # bulk modulus
'mu_nh' : 3.846, # shear modulus of neoHookean term
'mu_mr' : 1.923, # shear modulus of Mooney-Rivlin term
'kappa' : 1.923, # second modulus of Mooney-Rivlin term
# elasticity for LE term
'D' : stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
},),
'load' : 'empty',
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
regions = {
'Omega' : 'all',
'Bottom' : ('vertices in (z < 0.1)', 'facet'),
'Top' : ('vertices in (z > 2.9)', 'facet'),
}
ebcs = {
'fixb' : ('Bottom', {'u.all' : 0.0}),
'fixt' : ('Top', {'u.[0,1]' : 0.0}),
}
integrals = {
'i' : 1,
'isurf' : 2,
}
equations = {
'linear' : """dw_lin_elastic.i.Omega(solid.D, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'neo-Hookean' : """dw_tl_he_neohook.i.Omega(solid.mu_nh, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'Mooney-Rivlin' : """dw_tl_he_neohook.i.Omega(solid.mu_mr, v, u)
+ dw_tl_he_mooney_rivlin.i.Omega(solid.kappa, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 5,
'eps_a' : 1e-10,
'eps_r' : 1.0,
}),
'ts' : ('ts.simple', {
't0' : 0,
't1' : 1,
'dt' : None,
'n_step' : 101, # has precedence over dt!
}),
}
return locals()
##
# Pressure tractions.
def linear_tension(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def linear_compression(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(-0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def store_top_u(displacements):
"""Function _store() will be called at the end of each loading step. Top
displacements will be stored into `displacements`."""
def _store(problem, ts, state):
top = problem.domain.regions['Top']
top_u = problem.get_variables()['u'].get_state_in_region(top)
displacements.append(nm.mean(top_u[:,-1]))
return _store
def solve_branch(problem, branch_function):
displacements = {}
for key, eq in problem.conf.equations.iteritems():
problem.set_equations({key : eq})
load = problem.get_materials()['load']
load.set_function(branch_function)
time_solver = problem.get_time_solver()
time_solver.init_time()
out = []
for _ in time_solver(save_results=False, step_hook=store_top_u(out)):
pass
displacements[key] = nm.array(out, dtype=nm.float64)
return displacements
usage = '%prog [options]\n' + __doc__.rstrip()
helps = {
'no_plot' : 'do not show plot window',
}
def main():
from sfepy.base.base import output
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.discrete import Problem
from sfepy.base.plotutils import plt
parser = OptionParser(usage=usage, version='%prog')
parser.add_option('-n', '--no-plot',
action="store_true", dest='no_plot',
default=False, help=helps['no_plot'])
options, args = parser.parse_args()
required, other = get_standard_keywords()
# Use this file as the input file.
conf = ProblemConf.from_file(__file__, required, other)
# Create problem instance, but do not set equations.
problem = Problem.from_conf(conf, init_equations=False)
# Solve the problem. Output is ignored, results stored by using the
# step_hook.
u_t = solve_branch(problem, linear_tension)
u_c = solve_branch(problem, linear_compression)
# Get pressure load by calling linear_*() for each time step.
ts = problem.get_timestepper()
load_t = nm.array([linear_tension(ts, nm.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from(0)],
dtype=nm.float64).squeeze()
load_c = nm.array([linear_compression(ts, nm.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from(0)],
dtype=nm.float64).squeeze()
# Join the branches.
displacements = {}
for key in u_t.keys():
displacements[key] = nm.r_[u_c[key][::-1], u_t[key]]
load = nm.r_[load_c[::-1], load_t]
if plt is None:
output('matplotlib cannot be imported, printing raw data!')
| output(displacements) | sfepy.base.base.output |
#!/usr/bin/env python
"""
Compare various elastic materials w.r.t. uniaxial tension/compression test.
Requires Matplotlib.
"""
from optparse import OptionParser
import sys
sys.path.append('.')
import numpy as nm
def define():
"""Define the problem to solve."""
from sfepy.discrete.fem.meshio import UserMeshIO
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.mechanics.matcoefs import stiffness_from_lame
def mesh_hook(mesh, mode):
"""
Generate the block mesh.
"""
if mode == 'read':
mesh = gen_block_mesh([2, 2, 3], [2, 2, 4], [0, 0, 1.5], name='el3',
verbose=False)
return mesh
elif mode == 'write':
pass
filename_mesh = UserMeshIO(mesh_hook)
options = {
'nls' : 'newton',
'ls' : 'ls',
'ts' : 'ts',
'save_steps' : -1,
}
functions = {
'linear_tension' : (linear_tension,),
'linear_compression' : (linear_compression,),
'empty' : (lambda ts, coor, mode, region, ig: None,),
}
fields = {
'displacement' : ('real', 3, 'Omega', 1),
}
# Coefficients are chosen so that the tangent stiffness is the same for all
# material for zero strains.
# Young modulus = 10 kPa, Poisson's ratio = 0.3
materials = {
'solid' : ({
'K' : 8.333, # bulk modulus
'mu_nh' : 3.846, # shear modulus of neoHookean term
'mu_mr' : 1.923, # shear modulus of Mooney-Rivlin term
'kappa' : 1.923, # second modulus of Mooney-Rivlin term
# elasticity for LE term
'D' : stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
},),
'load' : 'empty',
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
regions = {
'Omega' : 'all',
'Bottom' : ('vertices in (z < 0.1)', 'facet'),
'Top' : ('vertices in (z > 2.9)', 'facet'),
}
ebcs = {
'fixb' : ('Bottom', {'u.all' : 0.0}),
'fixt' : ('Top', {'u.[0,1]' : 0.0}),
}
integrals = {
'i' : 1,
'isurf' : 2,
}
equations = {
'linear' : """dw_lin_elastic.i.Omega(solid.D, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'neo-Hookean' : """dw_tl_he_neohook.i.Omega(solid.mu_nh, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'Mooney-Rivlin' : """dw_tl_he_neohook.i.Omega(solid.mu_mr, v, u)
+ dw_tl_he_mooney_rivlin.i.Omega(solid.kappa, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 5,
'eps_a' : 1e-10,
'eps_r' : 1.0,
}),
'ts' : ('ts.simple', {
't0' : 0,
't1' : 1,
'dt' : None,
'n_step' : 101, # has precedence over dt!
}),
}
return locals()
##
# Pressure tractions.
def linear_tension(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def linear_compression(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(-0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def store_top_u(displacements):
"""Function _store() will be called at the end of each loading step. Top
displacements will be stored into `displacements`."""
def _store(problem, ts, state):
top = problem.domain.regions['Top']
top_u = problem.get_variables()['u'].get_state_in_region(top)
displacements.append(nm.mean(top_u[:,-1]))
return _store
def solve_branch(problem, branch_function):
displacements = {}
for key, eq in problem.conf.equations.iteritems():
problem.set_equations({key : eq})
load = problem.get_materials()['load']
load.set_function(branch_function)
time_solver = problem.get_time_solver()
time_solver.init_time()
out = []
for _ in time_solver(save_results=False, step_hook=store_top_u(out)):
pass
displacements[key] = nm.array(out, dtype=nm.float64)
return displacements
usage = '%prog [options]\n' + __doc__.rstrip()
helps = {
'no_plot' : 'do not show plot window',
}
def main():
from sfepy.base.base import output
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.discrete import Problem
from sfepy.base.plotutils import plt
parser = OptionParser(usage=usage, version='%prog')
parser.add_option('-n', '--no-plot',
action="store_true", dest='no_plot',
default=False, help=helps['no_plot'])
options, args = parser.parse_args()
required, other = get_standard_keywords()
# Use this file as the input file.
conf = ProblemConf.from_file(__file__, required, other)
# Create problem instance, but do not set equations.
problem = Problem.from_conf(conf, init_equations=False)
# Solve the problem. Output is ignored, results stored by using the
# step_hook.
u_t = solve_branch(problem, linear_tension)
u_c = solve_branch(problem, linear_compression)
# Get pressure load by calling linear_*() for each time step.
ts = problem.get_timestepper()
load_t = nm.array([linear_tension(ts, nm.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from(0)],
dtype=nm.float64).squeeze()
load_c = nm.array([linear_compression(ts, nm.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from(0)],
dtype=nm.float64).squeeze()
# Join the branches.
displacements = {}
for key in u_t.keys():
displacements[key] = nm.r_[u_c[key][::-1], u_t[key]]
load = nm.r_[load_c[::-1], load_t]
if plt is None:
output('matplotlib cannot be imported, printing raw data!')
output(displacements)
| output(load) | sfepy.base.base.output |
#!/usr/bin/env python
"""
Compare various elastic materials w.r.t. uniaxial tension/compression test.
Requires Matplotlib.
"""
from optparse import OptionParser
import sys
sys.path.append('.')
import numpy as nm
def define():
"""Define the problem to solve."""
from sfepy.discrete.fem.meshio import UserMeshIO
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.mechanics.matcoefs import stiffness_from_lame
def mesh_hook(mesh, mode):
"""
Generate the block mesh.
"""
if mode == 'read':
mesh = gen_block_mesh([2, 2, 3], [2, 2, 4], [0, 0, 1.5], name='el3',
verbose=False)
return mesh
elif mode == 'write':
pass
filename_mesh = UserMeshIO(mesh_hook)
options = {
'nls' : 'newton',
'ls' : 'ls',
'ts' : 'ts',
'save_steps' : -1,
}
functions = {
'linear_tension' : (linear_tension,),
'linear_compression' : (linear_compression,),
'empty' : (lambda ts, coor, mode, region, ig: None,),
}
fields = {
'displacement' : ('real', 3, 'Omega', 1),
}
# Coefficients are chosen so that the tangent stiffness is the same for all
# material for zero strains.
# Young modulus = 10 kPa, Poisson's ratio = 0.3
materials = {
'solid' : ({
'K' : 8.333, # bulk modulus
'mu_nh' : 3.846, # shear modulus of neoHookean term
'mu_mr' : 1.923, # shear modulus of Mooney-Rivlin term
'kappa' : 1.923, # second modulus of Mooney-Rivlin term
# elasticity for LE term
'D' : stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
},),
'load' : 'empty',
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
regions = {
'Omega' : 'all',
'Bottom' : ('vertices in (z < 0.1)', 'facet'),
'Top' : ('vertices in (z > 2.9)', 'facet'),
}
ebcs = {
'fixb' : ('Bottom', {'u.all' : 0.0}),
'fixt' : ('Top', {'u.[0,1]' : 0.0}),
}
integrals = {
'i' : 1,
'isurf' : 2,
}
equations = {
'linear' : """dw_lin_elastic.i.Omega(solid.D, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'neo-Hookean' : """dw_tl_he_neohook.i.Omega(solid.mu_nh, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'Mooney-Rivlin' : """dw_tl_he_neohook.i.Omega(solid.mu_mr, v, u)
+ dw_tl_he_mooney_rivlin.i.Omega(solid.kappa, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 5,
'eps_a' : 1e-10,
'eps_r' : 1.0,
}),
'ts' : ('ts.simple', {
't0' : 0,
't1' : 1,
'dt' : None,
'n_step' : 101, # has precedence over dt!
}),
}
return locals()
##
# Pressure tractions.
def linear_tension(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def linear_compression(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(-0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def store_top_u(displacements):
"""Function _store() will be called at the end of each loading step. Top
displacements will be stored into `displacements`."""
def _store(problem, ts, state):
top = problem.domain.regions['Top']
top_u = problem.get_variables()['u'].get_state_in_region(top)
displacements.append(nm.mean(top_u[:,-1]))
return _store
def solve_branch(problem, branch_function):
displacements = {}
for key, eq in problem.conf.equations.iteritems():
problem.set_equations({key : eq})
load = problem.get_materials()['load']
load.set_function(branch_function)
time_solver = problem.get_time_solver()
time_solver.init_time()
out = []
for _ in time_solver(save_results=False, step_hook=store_top_u(out)):
pass
displacements[key] = nm.array(out, dtype=nm.float64)
return displacements
usage = '%prog [options]\n' + __doc__.rstrip()
helps = {
'no_plot' : 'do not show plot window',
}
def main():
from sfepy.base.base import output
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.discrete import Problem
from sfepy.base.plotutils import plt
parser = OptionParser(usage=usage, version='%prog')
parser.add_option('-n', '--no-plot',
action="store_true", dest='no_plot',
default=False, help=helps['no_plot'])
options, args = parser.parse_args()
required, other = get_standard_keywords()
# Use this file as the input file.
conf = ProblemConf.from_file(__file__, required, other)
# Create problem instance, but do not set equations.
problem = Problem.from_conf(conf, init_equations=False)
# Solve the problem. Output is ignored, results stored by using the
# step_hook.
u_t = solve_branch(problem, linear_tension)
u_c = solve_branch(problem, linear_compression)
# Get pressure load by calling linear_*() for each time step.
ts = problem.get_timestepper()
load_t = nm.array([linear_tension(ts, nm.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from(0)],
dtype=nm.float64).squeeze()
load_c = nm.array([linear_compression(ts, nm.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from(0)],
dtype=nm.float64).squeeze()
# Join the branches.
displacements = {}
for key in u_t.keys():
displacements[key] = nm.r_[u_c[key][::-1], u_t[key]]
load = nm.r_[load_c[::-1], load_t]
if plt is None:
output('matplotlib cannot be imported, printing raw data!')
output(displacements)
output(load)
else:
legend = []
for key, val in displacements.iteritems():
plt.plot(load, val)
legend.append(key)
plt.legend(legend, loc = 2)
| plt.xlabel('tension [kPa]') | sfepy.base.plotutils.plt.xlabel |
#!/usr/bin/env python
"""
Compare various elastic materials w.r.t. uniaxial tension/compression test.
Requires Matplotlib.
"""
from optparse import OptionParser
import sys
sys.path.append('.')
import numpy as nm
def define():
"""Define the problem to solve."""
from sfepy.discrete.fem.meshio import UserMeshIO
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.mechanics.matcoefs import stiffness_from_lame
def mesh_hook(mesh, mode):
"""
Generate the block mesh.
"""
if mode == 'read':
mesh = gen_block_mesh([2, 2, 3], [2, 2, 4], [0, 0, 1.5], name='el3',
verbose=False)
return mesh
elif mode == 'write':
pass
filename_mesh = UserMeshIO(mesh_hook)
options = {
'nls' : 'newton',
'ls' : 'ls',
'ts' : 'ts',
'save_steps' : -1,
}
functions = {
'linear_tension' : (linear_tension,),
'linear_compression' : (linear_compression,),
'empty' : (lambda ts, coor, mode, region, ig: None,),
}
fields = {
'displacement' : ('real', 3, 'Omega', 1),
}
# Coefficients are chosen so that the tangent stiffness is the same for all
# material for zero strains.
# Young modulus = 10 kPa, Poisson's ratio = 0.3
materials = {
'solid' : ({
'K' : 8.333, # bulk modulus
'mu_nh' : 3.846, # shear modulus of neoHookean term
'mu_mr' : 1.923, # shear modulus of Mooney-Rivlin term
'kappa' : 1.923, # second modulus of Mooney-Rivlin term
# elasticity for LE term
'D' : stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
},),
'load' : 'empty',
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
regions = {
'Omega' : 'all',
'Bottom' : ('vertices in (z < 0.1)', 'facet'),
'Top' : ('vertices in (z > 2.9)', 'facet'),
}
ebcs = {
'fixb' : ('Bottom', {'u.all' : 0.0}),
'fixt' : ('Top', {'u.[0,1]' : 0.0}),
}
integrals = {
'i' : 1,
'isurf' : 2,
}
equations = {
'linear' : """dw_lin_elastic.i.Omega(solid.D, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'neo-Hookean' : """dw_tl_he_neohook.i.Omega(solid.mu_nh, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'Mooney-Rivlin' : """dw_tl_he_neohook.i.Omega(solid.mu_mr, v, u)
+ dw_tl_he_mooney_rivlin.i.Omega(solid.kappa, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 5,
'eps_a' : 1e-10,
'eps_r' : 1.0,
}),
'ts' : ('ts.simple', {
't0' : 0,
't1' : 1,
'dt' : None,
'n_step' : 101, # has precedence over dt!
}),
}
return locals()
##
# Pressure tractions.
def linear_tension(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def linear_compression(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(-0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def store_top_u(displacements):
"""Function _store() will be called at the end of each loading step. Top
displacements will be stored into `displacements`."""
def _store(problem, ts, state):
top = problem.domain.regions['Top']
top_u = problem.get_variables()['u'].get_state_in_region(top)
displacements.append(nm.mean(top_u[:,-1]))
return _store
def solve_branch(problem, branch_function):
displacements = {}
for key, eq in problem.conf.equations.iteritems():
problem.set_equations({key : eq})
load = problem.get_materials()['load']
load.set_function(branch_function)
time_solver = problem.get_time_solver()
time_solver.init_time()
out = []
for _ in time_solver(save_results=False, step_hook=store_top_u(out)):
pass
displacements[key] = nm.array(out, dtype=nm.float64)
return displacements
usage = '%prog [options]\n' + __doc__.rstrip()
helps = {
'no_plot' : 'do not show plot window',
}
def main():
from sfepy.base.base import output
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.discrete import Problem
from sfepy.base.plotutils import plt
parser = OptionParser(usage=usage, version='%prog')
parser.add_option('-n', '--no-plot',
action="store_true", dest='no_plot',
default=False, help=helps['no_plot'])
options, args = parser.parse_args()
required, other = get_standard_keywords()
# Use this file as the input file.
conf = ProblemConf.from_file(__file__, required, other)
# Create problem instance, but do not set equations.
problem = Problem.from_conf(conf, init_equations=False)
# Solve the problem. Output is ignored, results stored by using the
# step_hook.
u_t = solve_branch(problem, linear_tension)
u_c = solve_branch(problem, linear_compression)
# Get pressure load by calling linear_*() for each time step.
ts = problem.get_timestepper()
load_t = nm.array([linear_tension(ts, nm.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from(0)],
dtype=nm.float64).squeeze()
load_c = nm.array([linear_compression(ts, nm.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from(0)],
dtype=nm.float64).squeeze()
# Join the branches.
displacements = {}
for key in u_t.keys():
displacements[key] = nm.r_[u_c[key][::-1], u_t[key]]
load = nm.r_[load_c[::-1], load_t]
if plt is None:
output('matplotlib cannot be imported, printing raw data!')
output(displacements)
output(load)
else:
legend = []
for key, val in displacements.iteritems():
plt.plot(load, val)
legend.append(key)
plt.legend(legend, loc = 2)
plt.xlabel('tension [kPa]')
| plt.ylabel('displacement [mm]') | sfepy.base.plotutils.plt.ylabel |
#!/usr/bin/env python
"""
Compare various elastic materials w.r.t. uniaxial tension/compression test.
Requires Matplotlib.
"""
from optparse import OptionParser
import sys
sys.path.append('.')
import numpy as nm
def define():
"""Define the problem to solve."""
from sfepy.discrete.fem.meshio import UserMeshIO
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.mechanics.matcoefs import stiffness_from_lame
def mesh_hook(mesh, mode):
"""
Generate the block mesh.
"""
if mode == 'read':
mesh = gen_block_mesh([2, 2, 3], [2, 2, 4], [0, 0, 1.5], name='el3',
verbose=False)
return mesh
elif mode == 'write':
pass
filename_mesh = UserMeshIO(mesh_hook)
options = {
'nls' : 'newton',
'ls' : 'ls',
'ts' : 'ts',
'save_steps' : -1,
}
functions = {
'linear_tension' : (linear_tension,),
'linear_compression' : (linear_compression,),
'empty' : (lambda ts, coor, mode, region, ig: None,),
}
fields = {
'displacement' : ('real', 3, 'Omega', 1),
}
# Coefficients are chosen so that the tangent stiffness is the same for all
# material for zero strains.
# Young modulus = 10 kPa, Poisson's ratio = 0.3
materials = {
'solid' : ({
'K' : 8.333, # bulk modulus
'mu_nh' : 3.846, # shear modulus of neoHookean term
'mu_mr' : 1.923, # shear modulus of Mooney-Rivlin term
'kappa' : 1.923, # second modulus of Mooney-Rivlin term
# elasticity for LE term
'D' : stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
},),
'load' : 'empty',
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
regions = {
'Omega' : 'all',
'Bottom' : ('vertices in (z < 0.1)', 'facet'),
'Top' : ('vertices in (z > 2.9)', 'facet'),
}
ebcs = {
'fixb' : ('Bottom', {'u.all' : 0.0}),
'fixt' : ('Top', {'u.[0,1]' : 0.0}),
}
integrals = {
'i' : 1,
'isurf' : 2,
}
equations = {
'linear' : """dw_lin_elastic.i.Omega(solid.D, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'neo-Hookean' : """dw_tl_he_neohook.i.Omega(solid.mu_nh, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'Mooney-Rivlin' : """dw_tl_he_neohook.i.Omega(solid.mu_mr, v, u)
+ dw_tl_he_mooney_rivlin.i.Omega(solid.kappa, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 5,
'eps_a' : 1e-10,
'eps_r' : 1.0,
}),
'ts' : ('ts.simple', {
't0' : 0,
't1' : 1,
'dt' : None,
'n_step' : 101, # has precedence over dt!
}),
}
return locals()
##
# Pressure tractions.
def linear_tension(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def linear_compression(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(-0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def store_top_u(displacements):
"""Function _store() will be called at the end of each loading step. Top
displacements will be stored into `displacements`."""
def _store(problem, ts, state):
top = problem.domain.regions['Top']
top_u = problem.get_variables()['u'].get_state_in_region(top)
displacements.append(nm.mean(top_u[:,-1]))
return _store
def solve_branch(problem, branch_function):
displacements = {}
for key, eq in problem.conf.equations.iteritems():
problem.set_equations({key : eq})
load = problem.get_materials()['load']
load.set_function(branch_function)
time_solver = problem.get_time_solver()
time_solver.init_time()
out = []
for _ in time_solver(save_results=False, step_hook=store_top_u(out)):
pass
displacements[key] = nm.array(out, dtype=nm.float64)
return displacements
usage = '%prog [options]\n' + __doc__.rstrip()
helps = {
'no_plot' : 'do not show plot window',
}
def main():
from sfepy.base.base import output
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.discrete import Problem
from sfepy.base.plotutils import plt
parser = OptionParser(usage=usage, version='%prog')
parser.add_option('-n', '--no-plot',
action="store_true", dest='no_plot',
default=False, help=helps['no_plot'])
options, args = parser.parse_args()
required, other = get_standard_keywords()
# Use this file as the input file.
conf = ProblemConf.from_file(__file__, required, other)
# Create problem instance, but do not set equations.
problem = Problem.from_conf(conf, init_equations=False)
# Solve the problem. Output is ignored, results stored by using the
# step_hook.
u_t = solve_branch(problem, linear_tension)
u_c = solve_branch(problem, linear_compression)
# Get pressure load by calling linear_*() for each time step.
ts = problem.get_timestepper()
load_t = nm.array([linear_tension(ts, nm.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from(0)],
dtype=nm.float64).squeeze()
load_c = nm.array([linear_compression(ts, nm.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from(0)],
dtype=nm.float64).squeeze()
# Join the branches.
displacements = {}
for key in u_t.keys():
displacements[key] = nm.r_[u_c[key][::-1], u_t[key]]
load = nm.r_[load_c[::-1], load_t]
if plt is None:
output('matplotlib cannot be imported, printing raw data!')
output(displacements)
output(load)
else:
legend = []
for key, val in displacements.iteritems():
plt.plot(load, val)
legend.append(key)
plt.legend(legend, loc = 2)
plt.xlabel('tension [kPa]')
plt.ylabel('displacement [mm]')
| plt.grid(True) | sfepy.base.plotutils.plt.grid |
#!/usr/bin/env python
"""
Compare various elastic materials w.r.t. uniaxial tension/compression test.
Requires Matplotlib.
"""
from optparse import OptionParser
import sys
sys.path.append('.')
import numpy as nm
def define():
"""Define the problem to solve."""
from sfepy.discrete.fem.meshio import UserMeshIO
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.mechanics.matcoefs import stiffness_from_lame
def mesh_hook(mesh, mode):
"""
Generate the block mesh.
"""
if mode == 'read':
mesh = gen_block_mesh([2, 2, 3], [2, 2, 4], [0, 0, 1.5], name='el3',
verbose=False)
return mesh
elif mode == 'write':
pass
filename_mesh = UserMeshIO(mesh_hook)
options = {
'nls' : 'newton',
'ls' : 'ls',
'ts' : 'ts',
'save_steps' : -1,
}
functions = {
'linear_tension' : (linear_tension,),
'linear_compression' : (linear_compression,),
'empty' : (lambda ts, coor, mode, region, ig: None,),
}
fields = {
'displacement' : ('real', 3, 'Omega', 1),
}
# Coefficients are chosen so that the tangent stiffness is the same for all
# material for zero strains.
# Young modulus = 10 kPa, Poisson's ratio = 0.3
materials = {
'solid' : ({
'K' : 8.333, # bulk modulus
'mu_nh' : 3.846, # shear modulus of neoHookean term
'mu_mr' : 1.923, # shear modulus of Mooney-Rivlin term
'kappa' : 1.923, # second modulus of Mooney-Rivlin term
# elasticity for LE term
'D' : stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
},),
'load' : 'empty',
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
regions = {
'Omega' : 'all',
'Bottom' : ('vertices in (z < 0.1)', 'facet'),
'Top' : ('vertices in (z > 2.9)', 'facet'),
}
ebcs = {
'fixb' : ('Bottom', {'u.all' : 0.0}),
'fixt' : ('Top', {'u.[0,1]' : 0.0}),
}
integrals = {
'i' : 1,
'isurf' : 2,
}
equations = {
'linear' : """dw_lin_elastic.i.Omega(solid.D, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'neo-Hookean' : """dw_tl_he_neohook.i.Omega(solid.mu_nh, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'Mooney-Rivlin' : """dw_tl_he_neohook.i.Omega(solid.mu_mr, v, u)
+ dw_tl_he_mooney_rivlin.i.Omega(solid.kappa, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 5,
'eps_a' : 1e-10,
'eps_r' : 1.0,
}),
'ts' : ('ts.simple', {
't0' : 0,
't1' : 1,
'dt' : None,
'n_step' : 101, # has precedence over dt!
}),
}
return locals()
##
# Pressure tractions.
def linear_tension(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def linear_compression(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(-0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def store_top_u(displacements):
"""Function _store() will be called at the end of each loading step. Top
displacements will be stored into `displacements`."""
def _store(problem, ts, state):
top = problem.domain.regions['Top']
top_u = problem.get_variables()['u'].get_state_in_region(top)
displacements.append(nm.mean(top_u[:,-1]))
return _store
def solve_branch(problem, branch_function):
displacements = {}
for key, eq in problem.conf.equations.iteritems():
problem.set_equations({key : eq})
load = problem.get_materials()['load']
load.set_function(branch_function)
time_solver = problem.get_time_solver()
time_solver.init_time()
out = []
for _ in time_solver(save_results=False, step_hook=store_top_u(out)):
pass
displacements[key] = nm.array(out, dtype=nm.float64)
return displacements
usage = '%prog [options]\n' + __doc__.rstrip()
helps = {
'no_plot' : 'do not show plot window',
}
def main():
from sfepy.base.base import output
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.discrete import Problem
from sfepy.base.plotutils import plt
parser = OptionParser(usage=usage, version='%prog')
parser.add_option('-n', '--no-plot',
action="store_true", dest='no_plot',
default=False, help=helps['no_plot'])
options, args = parser.parse_args()
required, other = get_standard_keywords()
# Use this file as the input file.
conf = ProblemConf.from_file(__file__, required, other)
# Create problem instance, but do not set equations.
problem = Problem.from_conf(conf, init_equations=False)
# Solve the problem. Output is ignored, results stored by using the
# step_hook.
u_t = solve_branch(problem, linear_tension)
u_c = solve_branch(problem, linear_compression)
# Get pressure load by calling linear_*() for each time step.
ts = problem.get_timestepper()
load_t = nm.array([linear_tension(ts, nm.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from(0)],
dtype=nm.float64).squeeze()
load_c = nm.array([linear_compression(ts, nm.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from(0)],
dtype=nm.float64).squeeze()
# Join the branches.
displacements = {}
for key in u_t.keys():
displacements[key] = nm.r_[u_c[key][::-1], u_t[key]]
load = nm.r_[load_c[::-1], load_t]
if plt is None:
output('matplotlib cannot be imported, printing raw data!')
output(displacements)
output(load)
else:
legend = []
for key, val in displacements.iteritems():
| plt.plot(load, val) | sfepy.base.plotutils.plt.plot |
#!/usr/bin/env python
"""
Compare various elastic materials w.r.t. uniaxial tension/compression test.
Requires Matplotlib.
"""
from optparse import OptionParser
import sys
sys.path.append('.')
import numpy as nm
def define():
"""Define the problem to solve."""
from sfepy.discrete.fem.meshio import UserMeshIO
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.mechanics.matcoefs import stiffness_from_lame
def mesh_hook(mesh, mode):
"""
Generate the block mesh.
"""
if mode == 'read':
mesh = gen_block_mesh([2, 2, 3], [2, 2, 4], [0, 0, 1.5], name='el3',
verbose=False)
return mesh
elif mode == 'write':
pass
filename_mesh = UserMeshIO(mesh_hook)
options = {
'nls' : 'newton',
'ls' : 'ls',
'ts' : 'ts',
'save_steps' : -1,
}
functions = {
'linear_tension' : (linear_tension,),
'linear_compression' : (linear_compression,),
'empty' : (lambda ts, coor, mode, region, ig: None,),
}
fields = {
'displacement' : ('real', 3, 'Omega', 1),
}
# Coefficients are chosen so that the tangent stiffness is the same for all
# material for zero strains.
# Young modulus = 10 kPa, Poisson's ratio = 0.3
materials = {
'solid' : ({
'K' : 8.333, # bulk modulus
'mu_nh' : 3.846, # shear modulus of neoHookean term
'mu_mr' : 1.923, # shear modulus of Mooney-Rivlin term
'kappa' : 1.923, # second modulus of Mooney-Rivlin term
# elasticity for LE term
'D' : stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
},),
'load' : 'empty',
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
regions = {
'Omega' : 'all',
'Bottom' : ('vertices in (z < 0.1)', 'facet'),
'Top' : ('vertices in (z > 2.9)', 'facet'),
}
ebcs = {
'fixb' : ('Bottom', {'u.all' : 0.0}),
'fixt' : ('Top', {'u.[0,1]' : 0.0}),
}
integrals = {
'i' : 1,
'isurf' : 2,
}
equations = {
'linear' : """dw_lin_elastic.i.Omega(solid.D, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'neo-Hookean' : """dw_tl_he_neohook.i.Omega(solid.mu_nh, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'Mooney-Rivlin' : """dw_tl_he_neohook.i.Omega(solid.mu_mr, v, u)
+ dw_tl_he_mooney_rivlin.i.Omega(solid.kappa, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 5,
'eps_a' : 1e-10,
'eps_r' : 1.0,
}),
'ts' : ('ts.simple', {
't0' : 0,
't1' : 1,
'dt' : None,
'n_step' : 101, # has precedence over dt!
}),
}
return locals()
##
# Pressure tractions.
def linear_tension(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def linear_compression(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(-0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def store_top_u(displacements):
"""Function _store() will be called at the end of each loading step. Top
displacements will be stored into `displacements`."""
def _store(problem, ts, state):
top = problem.domain.regions['Top']
top_u = problem.get_variables()['u'].get_state_in_region(top)
displacements.append(nm.mean(top_u[:,-1]))
return _store
def solve_branch(problem, branch_function):
displacements = {}
for key, eq in problem.conf.equations.iteritems():
problem.set_equations({key : eq})
load = problem.get_materials()['load']
load.set_function(branch_function)
time_solver = problem.get_time_solver()
time_solver.init_time()
out = []
for _ in time_solver(save_results=False, step_hook=store_top_u(out)):
pass
displacements[key] = nm.array(out, dtype=nm.float64)
return displacements
usage = '%prog [options]\n' + __doc__.rstrip()
helps = {
'no_plot' : 'do not show plot window',
}
def main():
from sfepy.base.base import output
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.discrete import Problem
from sfepy.base.plotutils import plt
parser = OptionParser(usage=usage, version='%prog')
parser.add_option('-n', '--no-plot',
action="store_true", dest='no_plot',
default=False, help=helps['no_plot'])
options, args = parser.parse_args()
required, other = get_standard_keywords()
# Use this file as the input file.
conf = ProblemConf.from_file(__file__, required, other)
# Create problem instance, but do not set equations.
problem = Problem.from_conf(conf, init_equations=False)
# Solve the problem. Output is ignored, results stored by using the
# step_hook.
u_t = solve_branch(problem, linear_tension)
u_c = solve_branch(problem, linear_compression)
# Get pressure load by calling linear_*() for each time step.
ts = problem.get_timestepper()
load_t = nm.array([linear_tension(ts, nm.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from(0)],
dtype=nm.float64).squeeze()
load_c = nm.array([linear_compression(ts, nm.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from(0)],
dtype=nm.float64).squeeze()
# Join the branches.
displacements = {}
for key in u_t.keys():
displacements[key] = nm.r_[u_c[key][::-1], u_t[key]]
load = nm.r_[load_c[::-1], load_t]
if plt is None:
output('matplotlib cannot be imported, printing raw data!')
output(displacements)
output(load)
else:
legend = []
for key, val in displacements.iteritems():
plt.plot(load, val)
legend.append(key)
plt.legend(legend, loc = 2)
plt.xlabel('tension [kPa]')
plt.ylabel('displacement [mm]')
plt.grid(True)
plt.gcf().savefig('pressure_displacement.png')
if not options.no_plot:
| plt.show() | sfepy.base.plotutils.plt.show |
#!/usr/bin/env python
"""
Compare various elastic materials w.r.t. uniaxial tension/compression test.
Requires Matplotlib.
"""
from optparse import OptionParser
import sys
sys.path.append('.')
import numpy as nm
def define():
"""Define the problem to solve."""
from sfepy.discrete.fem.meshio import UserMeshIO
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.mechanics.matcoefs import stiffness_from_lame
def mesh_hook(mesh, mode):
"""
Generate the block mesh.
"""
if mode == 'read':
mesh = gen_block_mesh([2, 2, 3], [2, 2, 4], [0, 0, 1.5], name='el3',
verbose=False)
return mesh
elif mode == 'write':
pass
filename_mesh = UserMeshIO(mesh_hook)
options = {
'nls' : 'newton',
'ls' : 'ls',
'ts' : 'ts',
'save_steps' : -1,
}
functions = {
'linear_tension' : (linear_tension,),
'linear_compression' : (linear_compression,),
'empty' : (lambda ts, coor, mode, region, ig: None,),
}
fields = {
'displacement' : ('real', 3, 'Omega', 1),
}
# Coefficients are chosen so that the tangent stiffness is the same for all
# material for zero strains.
# Young modulus = 10 kPa, Poisson's ratio = 0.3
materials = {
'solid' : ({
'K' : 8.333, # bulk modulus
'mu_nh' : 3.846, # shear modulus of neoHookean term
'mu_mr' : 1.923, # shear modulus of Mooney-Rivlin term
'kappa' : 1.923, # second modulus of Mooney-Rivlin term
# elasticity for LE term
'D' : | stiffness_from_lame(dim=3, lam=5.769, mu=3.846) | sfepy.mechanics.matcoefs.stiffness_from_lame |
#!/usr/bin/env python
"""
Compare various elastic materials w.r.t. uniaxial tension/compression test.
Requires Matplotlib.
"""
from optparse import OptionParser
import sys
sys.path.append('.')
import numpy as nm
def define():
"""Define the problem to solve."""
from sfepy.discrete.fem.meshio import UserMeshIO
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.mechanics.matcoefs import stiffness_from_lame
def mesh_hook(mesh, mode):
"""
Generate the block mesh.
"""
if mode == 'read':
mesh = gen_block_mesh([2, 2, 3], [2, 2, 4], [0, 0, 1.5], name='el3',
verbose=False)
return mesh
elif mode == 'write':
pass
filename_mesh = UserMeshIO(mesh_hook)
options = {
'nls' : 'newton',
'ls' : 'ls',
'ts' : 'ts',
'save_steps' : -1,
}
functions = {
'linear_tension' : (linear_tension,),
'linear_compression' : (linear_compression,),
'empty' : (lambda ts, coor, mode, region, ig: None,),
}
fields = {
'displacement' : ('real', 3, 'Omega', 1),
}
# Coefficients are chosen so that the tangent stiffness is the same for all
# material for zero strains.
# Young modulus = 10 kPa, Poisson's ratio = 0.3
materials = {
'solid' : ({
'K' : 8.333, # bulk modulus
'mu_nh' : 3.846, # shear modulus of neoHookean term
'mu_mr' : 1.923, # shear modulus of Mooney-Rivlin term
'kappa' : 1.923, # second modulus of Mooney-Rivlin term
# elasticity for LE term
'D' : stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
},),
'load' : 'empty',
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
regions = {
'Omega' : 'all',
'Bottom' : ('vertices in (z < 0.1)', 'facet'),
'Top' : ('vertices in (z > 2.9)', 'facet'),
}
ebcs = {
'fixb' : ('Bottom', {'u.all' : 0.0}),
'fixt' : ('Top', {'u.[0,1]' : 0.0}),
}
integrals = {
'i' : 1,
'isurf' : 2,
}
equations = {
'linear' : """dw_lin_elastic.i.Omega(solid.D, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'neo-Hookean' : """dw_tl_he_neohook.i.Omega(solid.mu_nh, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
'Mooney-Rivlin' : """dw_tl_he_neohook.i.Omega(solid.mu_mr, v, u)
+ dw_tl_he_mooney_rivlin.i.Omega(solid.kappa, v, u)
+ dw_tl_bulk_penalty.i.Omega(solid.K, v, u)
= dw_surface_ltr.isurf.Top(load.val, v)""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 5,
'eps_a' : 1e-10,
'eps_r' : 1.0,
}),
'ts' : ('ts.simple', {
't0' : 0,
't1' : 1,
'dt' : None,
'n_step' : 101, # has precedence over dt!
}),
}
return locals()
##
# Pressure tractions.
def linear_tension(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def linear_compression(ts, coor, mode=None, **kwargs):
if mode == 'qp':
val = nm.tile(-0.1 * ts.step, (coor.shape[0], 1, 1))
return {'val' : val}
def store_top_u(displacements):
"""Function _store() will be called at the end of each loading step. Top
displacements will be stored into `displacements`."""
def _store(problem, ts, state):
top = problem.domain.regions['Top']
top_u = problem.get_variables()['u'].get_state_in_region(top)
displacements.append(nm.mean(top_u[:,-1]))
return _store
def solve_branch(problem, branch_function):
displacements = {}
for key, eq in problem.conf.equations.iteritems():
problem.set_equations({key : eq})
load = problem.get_materials()['load']
load.set_function(branch_function)
time_solver = problem.get_time_solver()
time_solver.init_time()
out = []
for _ in time_solver(save_results=False, step_hook=store_top_u(out)):
pass
displacements[key] = nm.array(out, dtype=nm.float64)
return displacements
usage = '%prog [options]\n' + __doc__.rstrip()
helps = {
'no_plot' : 'do not show plot window',
}
def main():
from sfepy.base.base import output
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.discrete import Problem
from sfepy.base.plotutils import plt
parser = OptionParser(usage=usage, version='%prog')
parser.add_option('-n', '--no-plot',
action="store_true", dest='no_plot',
default=False, help=helps['no_plot'])
options, args = parser.parse_args()
required, other = get_standard_keywords()
# Use this file as the input file.
conf = ProblemConf.from_file(__file__, required, other)
# Create problem instance, but do not set equations.
problem = Problem.from_conf(conf, init_equations=False)
# Solve the problem. Output is ignored, results stored by using the
# step_hook.
u_t = solve_branch(problem, linear_tension)
u_c = solve_branch(problem, linear_compression)
# Get pressure load by calling linear_*() for each time step.
ts = problem.get_timestepper()
load_t = nm.array([linear_tension(ts, nm.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from(0)],
dtype=nm.float64).squeeze()
load_c = nm.array([linear_compression(ts, nm.array([[0.0]]), 'qp')['val']
for aux in ts.iter_from(0)],
dtype=nm.float64).squeeze()
# Join the branches.
displacements = {}
for key in u_t.keys():
displacements[key] = nm.r_[u_c[key][::-1], u_t[key]]
load = nm.r_[load_c[::-1], load_t]
if plt is None:
output('matplotlib cannot be imported, printing raw data!')
output(displacements)
output(load)
else:
legend = []
for key, val in displacements.iteritems():
plt.plot(load, val)
legend.append(key)
plt.legend(legend, loc = 2)
plt.xlabel('tension [kPa]')
plt.ylabel('displacement [mm]')
plt.grid(True)
| plt.gcf() | sfepy.base.plotutils.plt.gcf |