prompt
stringlengths 174
59.5k
| completion
stringlengths 7
228
| api
stringlengths 12
64
|
---|---|---|
r"""
Thermo-elasticity with a computed temperature demonstrating equation sequence
solver.
Uses `dw_biot` term with an isotropic coefficient for thermo-elastic coupling.
The equation sequence solver (``'ess'`` in ``solvers``) automatically solves
first the temperature distribution and then the elasticity problem with the
already computed temperature.
Find :math:`\ul{u}`, :math:`T` such that:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
- \int_{\Omega} (T - T_0)\ \alpha_{ij} e_{ij}(\ul{v})
= 0
\;, \quad \forall \ul{v} \;,
\int_{\Omega} \nabla s \cdot \nabla T
= 0
\;, \quad \forall s \;.
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;, \\
\alpha_{ij} = (3 \lambda + 2 \mu) \alpha \delta_{ij} \;,
:math:`T_0` is the background temperature and :math:`\alpha` is the thermal
expansion coefficient.
Notes
-----
The gallery image was produced by (plus proper view settings)::
./postproc.py block.vtk -d'u,plot_displacements,rel_scaling=1000,color_kind="scalars",color_name="T"' --wireframe --only-names=u -b
"""
import numpy as np
from sfepy.mechanics.matcoefs import stiffness_from_lame
from sfepy import data_dir
# Material parameters.
lam = 10.0
mu = 5.0
thermal_expandability = 1.25e-5
T0 = 20.0 # Background temperature.
filename_mesh = data_dir + '/meshes/3d/block.mesh'
options = {
'ts' : 'ess',
'nls' : 'newton',
'ls' : 'ls',
}
regions = {
'Omega' : 'all',
'Left' : ('vertices in (x < -4.99)', 'facet'),
'Right' : ('vertices in (x > 4.99)', 'facet'),
'Bottom' : ('vertices in (z < -0.99)', 'facet'),
}
fields = {
'displacement': ('real', 3, 'Omega', 1),
'temperature': ('real', 1, 'Omega', 1),
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
'T' : ('unknown field', 'temperature', 1),
's' : ('test field', 'temperature', 'T'),
}
ebcs = {
'u0' : ('Left', {'u.all' : 0.0}),
't0' : ('Left', {'T.0' : 20.0}),
't2' : ('Bottom', {'T.0' : 0.0}),
't1' : ('Right', {'T.0' : 30.0}),
}
eye_sym = np.array([[1], [1], [1], [0], [0], [0]], dtype=np.float64)
materials = {
'solid' : ({
'D' : | stiffness_from_lame(3, lam=lam, mu=mu) | sfepy.mechanics.matcoefs.stiffness_from_lame |
from copy import copy
from sfepy.base.base import output, get_default, Struct
from sfepy.applications import PDESolverApp, Application
from coefs_base import MiniAppBase
def insert_sub_reqs(reqs, levels, req_info):
"""Recursively build all requirements in correct order."""
all_reqs = []
for _, req in enumerate(reqs):
# Coefficients are referenced as 'c.<name>'...
areq = req
if req.startswith('c.'):
areq = req[2:]
try:
rargs = req_info[areq]
except KeyError:
raise ValueError('requirement "%s" is not defined!' % req)
sub_reqs = rargs.get('requires', [])
if req in levels:
raise ValueError('circular requirement "%s"!' % (req))
if sub_reqs:
levels.append(req)
all_reqs.extend(insert_sub_reqs(sub_reqs, levels, req_info))
levels.pop()
if req in all_reqs:
raise ValueError('circular requirement "%s"!' % (req))
else:
all_reqs.append(req)
return all_reqs
class HomogenizationEngine(PDESolverApp):
@staticmethod
def process_options(options):
get = options.get
return Struct(coefs=get('coefs', None,
'missing "coefs" in options!'),
requirements=get('requirements', None,
'missing "requirements" in options!'),
compute_only=get('compute_only', None),
save_format=get('save_format', 'vtk'),
dump_format=get('dump_format', 'h5'),
coefs_info=get('coefs_info', None))
def __init__(self, problem, options, app_options=None,
volume=None, output_prefix='he:', **kwargs):
"""Bypasses PDESolverApp.__init__()!"""
Application.__init__(self, problem.conf, options, output_prefix,
**kwargs)
self.problem = problem
self.setup_options(app_options=app_options)
self.setup_output_info(self.problem, self.options)
if volume is None:
self.volume = self.problem.evaluate(self.app_options.total_volume)
else:
self.volume = volume
def setup_options(self, app_options=None):
| PDESolverApp.setup_options(self) | sfepy.applications.PDESolverApp.setup_options |
from copy import copy
from sfepy.base.base import output, get_default, Struct
from sfepy.applications import PDESolverApp, Application
from coefs_base import MiniAppBase
def insert_sub_reqs(reqs, levels, req_info):
"""Recursively build all requirements in correct order."""
all_reqs = []
for _, req in enumerate(reqs):
# Coefficients are referenced as 'c.<name>'...
areq = req
if req.startswith('c.'):
areq = req[2:]
try:
rargs = req_info[areq]
except KeyError:
raise ValueError('requirement "%s" is not defined!' % req)
sub_reqs = rargs.get('requires', [])
if req in levels:
raise ValueError('circular requirement "%s"!' % (req))
if sub_reqs:
levels.append(req)
all_reqs.extend(insert_sub_reqs(sub_reqs, levels, req_info))
levels.pop()
if req in all_reqs:
raise ValueError('circular requirement "%s"!' % (req))
else:
all_reqs.append(req)
return all_reqs
class HomogenizationEngine(PDESolverApp):
@staticmethod
def process_options(options):
get = options.get
return Struct(coefs=get('coefs', None,
'missing "coefs" in options!'),
requirements=get('requirements', None,
'missing "requirements" in options!'),
compute_only=get('compute_only', None),
save_format=get('save_format', 'vtk'),
dump_format=get('dump_format', 'h5'),
coefs_info=get('coefs_info', None))
def __init__(self, problem, options, app_options=None,
volume=None, output_prefix='he:', **kwargs):
"""Bypasses PDESolverApp.__init__()!"""
Application.__init__(self, problem.conf, options, output_prefix,
**kwargs)
self.problem = problem
self.setup_options(app_options=app_options)
self.setup_output_info(self.problem, self.options)
if volume is None:
self.volume = self.problem.evaluate(self.app_options.total_volume)
else:
self.volume = volume
def setup_options(self, app_options=None):
PDESolverApp.setup_options(self)
app_options = | get_default(app_options, self.conf.options) | sfepy.base.base.get_default |
from copy import copy
from sfepy.base.base import output, get_default, Struct
from sfepy.applications import PDESolverApp, Application
from coefs_base import MiniAppBase
def insert_sub_reqs(reqs, levels, req_info):
"""Recursively build all requirements in correct order."""
all_reqs = []
for _, req in enumerate(reqs):
# Coefficients are referenced as 'c.<name>'...
areq = req
if req.startswith('c.'):
areq = req[2:]
try:
rargs = req_info[areq]
except KeyError:
raise ValueError('requirement "%s" is not defined!' % req)
sub_reqs = rargs.get('requires', [])
if req in levels:
raise ValueError('circular requirement "%s"!' % (req))
if sub_reqs:
levels.append(req)
all_reqs.extend(insert_sub_reqs(sub_reqs, levels, req_info))
levels.pop()
if req in all_reqs:
raise ValueError('circular requirement "%s"!' % (req))
else:
all_reqs.append(req)
return all_reqs
class HomogenizationEngine(PDESolverApp):
@staticmethod
def process_options(options):
get = options.get
return Struct(coefs=get('coefs', None,
'missing "coefs" in options!'),
requirements=get('requirements', None,
'missing "requirements" in options!'),
compute_only=get('compute_only', None),
save_format=get('save_format', 'vtk'),
dump_format=get('dump_format', 'h5'),
coefs_info=get('coefs_info', None))
def __init__(self, problem, options, app_options=None,
volume=None, output_prefix='he:', **kwargs):
"""Bypasses PDESolverApp.__init__()!"""
Application.__init__(self, problem.conf, options, output_prefix,
**kwargs)
self.problem = problem
self.setup_options(app_options=app_options)
self.setup_output_info(self.problem, self.options)
if volume is None:
self.volume = self.problem.evaluate(self.app_options.total_volume)
else:
self.volume = volume
def setup_options(self, app_options=None):
PDESolverApp.setup_options(self)
app_options = get_default(app_options, self.conf.options)
po = HomogenizationEngine.process_options
self.app_options += po(app_options)
def compute_requirements(self, requirements, dependencies, store):
problem = self.problem
opts = self.app_options
req_info = getattr(self.conf, opts.requirements)
requires = insert_sub_reqs(copy(requirements), [], req_info)
for req in requires:
if req in dependencies and (dependencies[req] is not None):
continue
output('computing dependency %s...' % req)
rargs = req_info[req]
mini_app = MiniAppBase.any_from_conf(req, problem, rargs)
mini_app.setup_output(save_format=opts.save_format,
dump_format=opts.dump_format,
post_process_hook=self.post_process_hook,
file_per_var=opts.file_per_var)
store(mini_app)
problem.clear_equations()
# Pass only the direct dependencies, not the indirect ones.
dep_requires = rargs.get('requires', [])
data = {}
for key in dep_requires:
data[key] = dependencies[key]
dep = mini_app(data=data)
dependencies[req] = dep
output('...done')
return dependencies
def call(self, ret_all=False):
problem = self.problem
opts = self.app_options
coef_info = getattr(self.conf, opts.coefs)
compute_names = set(get_default(opts.compute_only, coef_info.keys()))
compute_names = ['c.' + key for key in compute_names]
is_store_filenames = coef_info.pop('filenames', None) is not None
try:
compute_names.remove('c.filenames')
except:
pass
dependencies = {}
save_names = {}
dump_names = {}
def store_filenames(app):
if not '(not_set)' in app.get_save_name_base():
save_names[app.name] = app.get_save_name_base()
if not '(not_set)' in app.get_dump_name_base():
dump_names[app.name] = app.get_dump_name_base()
# Some coefficients can require other coefficients - resolve their
# order here.
req_info = self.conf.get(opts.requirements, {})
info = copy(coef_info)
info.update(req_info)
all_deps = set(compute_names)
sorted_names = []
for coef_name in compute_names:
cargs = coef_info[coef_name[2:]]
requires = cargs.get('requires', [])
deps = insert_sub_reqs(copy(requires), [], info)
all_deps.update(deps)
aux = [key for key in deps if key.startswith('c.')] + [coef_name]
sorted_names.extend(aux)
sorted_coef_names = []
for name in sorted_names:
if name[2:] not in sorted_coef_names:
sorted_coef_names.append(name[2:])
coefs = | Struct() | sfepy.base.base.Struct |
from copy import copy
from sfepy.base.base import output, get_default, Struct
from sfepy.applications import PDESolverApp, Application
from coefs_base import MiniAppBase
def insert_sub_reqs(reqs, levels, req_info):
"""Recursively build all requirements in correct order."""
all_reqs = []
for _, req in enumerate(reqs):
# Coefficients are referenced as 'c.<name>'...
areq = req
if req.startswith('c.'):
areq = req[2:]
try:
rargs = req_info[areq]
except KeyError:
raise ValueError('requirement "%s" is not defined!' % req)
sub_reqs = rargs.get('requires', [])
if req in levels:
raise ValueError('circular requirement "%s"!' % (req))
if sub_reqs:
levels.append(req)
all_reqs.extend(insert_sub_reqs(sub_reqs, levels, req_info))
levels.pop()
if req in all_reqs:
raise ValueError('circular requirement "%s"!' % (req))
else:
all_reqs.append(req)
return all_reqs
class HomogenizationEngine(PDESolverApp):
@staticmethod
def process_options(options):
get = options.get
return Struct(coefs=get('coefs', None,
'missing "coefs" in options!'),
requirements=get('requirements', None,
'missing "requirements" in options!'),
compute_only=get('compute_only', None),
save_format=get('save_format', 'vtk'),
dump_format=get('dump_format', 'h5'),
coefs_info=get('coefs_info', None))
def __init__(self, problem, options, app_options=None,
volume=None, output_prefix='he:', **kwargs):
"""Bypasses PDESolverApp.__init__()!"""
Application.__init__(self, problem.conf, options, output_prefix,
**kwargs)
self.problem = problem
self.setup_options(app_options=app_options)
self.setup_output_info(self.problem, self.options)
if volume is None:
self.volume = self.problem.evaluate(self.app_options.total_volume)
else:
self.volume = volume
def setup_options(self, app_options=None):
PDESolverApp.setup_options(self)
app_options = get_default(app_options, self.conf.options)
po = HomogenizationEngine.process_options
self.app_options += po(app_options)
def compute_requirements(self, requirements, dependencies, store):
problem = self.problem
opts = self.app_options
req_info = getattr(self.conf, opts.requirements)
requires = insert_sub_reqs(copy(requirements), [], req_info)
for req in requires:
if req in dependencies and (dependencies[req] is not None):
continue
| output('computing dependency %s...' % req) | sfepy.base.base.output |
from copy import copy
from sfepy.base.base import output, get_default, Struct
from sfepy.applications import PDESolverApp, Application
from coefs_base import MiniAppBase
def insert_sub_reqs(reqs, levels, req_info):
"""Recursively build all requirements in correct order."""
all_reqs = []
for _, req in enumerate(reqs):
# Coefficients are referenced as 'c.<name>'...
areq = req
if req.startswith('c.'):
areq = req[2:]
try:
rargs = req_info[areq]
except KeyError:
raise ValueError('requirement "%s" is not defined!' % req)
sub_reqs = rargs.get('requires', [])
if req in levels:
raise ValueError('circular requirement "%s"!' % (req))
if sub_reqs:
levels.append(req)
all_reqs.extend(insert_sub_reqs(sub_reqs, levels, req_info))
levels.pop()
if req in all_reqs:
raise ValueError('circular requirement "%s"!' % (req))
else:
all_reqs.append(req)
return all_reqs
class HomogenizationEngine(PDESolverApp):
@staticmethod
def process_options(options):
get = options.get
return Struct(coefs=get('coefs', None,
'missing "coefs" in options!'),
requirements=get('requirements', None,
'missing "requirements" in options!'),
compute_only=get('compute_only', None),
save_format=get('save_format', 'vtk'),
dump_format=get('dump_format', 'h5'),
coefs_info=get('coefs_info', None))
def __init__(self, problem, options, app_options=None,
volume=None, output_prefix='he:', **kwargs):
"""Bypasses PDESolverApp.__init__()!"""
Application.__init__(self, problem.conf, options, output_prefix,
**kwargs)
self.problem = problem
self.setup_options(app_options=app_options)
self.setup_output_info(self.problem, self.options)
if volume is None:
self.volume = self.problem.evaluate(self.app_options.total_volume)
else:
self.volume = volume
def setup_options(self, app_options=None):
PDESolverApp.setup_options(self)
app_options = get_default(app_options, self.conf.options)
po = HomogenizationEngine.process_options
self.app_options += po(app_options)
def compute_requirements(self, requirements, dependencies, store):
problem = self.problem
opts = self.app_options
req_info = getattr(self.conf, opts.requirements)
requires = insert_sub_reqs(copy(requirements), [], req_info)
for req in requires:
if req in dependencies and (dependencies[req] is not None):
continue
output('computing dependency %s...' % req)
rargs = req_info[req]
mini_app = MiniAppBase.any_from_conf(req, problem, rargs)
mini_app.setup_output(save_format=opts.save_format,
dump_format=opts.dump_format,
post_process_hook=self.post_process_hook,
file_per_var=opts.file_per_var)
store(mini_app)
problem.clear_equations()
# Pass only the direct dependencies, not the indirect ones.
dep_requires = rargs.get('requires', [])
data = {}
for key in dep_requires:
data[key] = dependencies[key]
dep = mini_app(data=data)
dependencies[req] = dep
| output('...done') | sfepy.base.base.output |
from copy import copy
from sfepy.base.base import output, get_default, Struct
from sfepy.applications import PDESolverApp, Application
from coefs_base import MiniAppBase
def insert_sub_reqs(reqs, levels, req_info):
"""Recursively build all requirements in correct order."""
all_reqs = []
for _, req in enumerate(reqs):
# Coefficients are referenced as 'c.<name>'...
areq = req
if req.startswith('c.'):
areq = req[2:]
try:
rargs = req_info[areq]
except KeyError:
raise ValueError('requirement "%s" is not defined!' % req)
sub_reqs = rargs.get('requires', [])
if req in levels:
raise ValueError('circular requirement "%s"!' % (req))
if sub_reqs:
levels.append(req)
all_reqs.extend(insert_sub_reqs(sub_reqs, levels, req_info))
levels.pop()
if req in all_reqs:
raise ValueError('circular requirement "%s"!' % (req))
else:
all_reqs.append(req)
return all_reqs
class HomogenizationEngine(PDESolverApp):
@staticmethod
def process_options(options):
get = options.get
return Struct(coefs=get('coefs', None,
'missing "coefs" in options!'),
requirements=get('requirements', None,
'missing "requirements" in options!'),
compute_only=get('compute_only', None),
save_format=get('save_format', 'vtk'),
dump_format=get('dump_format', 'h5'),
coefs_info=get('coefs_info', None))
def __init__(self, problem, options, app_options=None,
volume=None, output_prefix='he:', **kwargs):
"""Bypasses PDESolverApp.__init__()!"""
Application.__init__(self, problem.conf, options, output_prefix,
**kwargs)
self.problem = problem
self.setup_options(app_options=app_options)
self.setup_output_info(self.problem, self.options)
if volume is None:
self.volume = self.problem.evaluate(self.app_options.total_volume)
else:
self.volume = volume
def setup_options(self, app_options=None):
PDESolverApp.setup_options(self)
app_options = get_default(app_options, self.conf.options)
po = HomogenizationEngine.process_options
self.app_options += po(app_options)
def compute_requirements(self, requirements, dependencies, store):
problem = self.problem
opts = self.app_options
req_info = getattr(self.conf, opts.requirements)
requires = insert_sub_reqs(copy(requirements), [], req_info)
for req in requires:
if req in dependencies and (dependencies[req] is not None):
continue
output('computing dependency %s...' % req)
rargs = req_info[req]
mini_app = MiniAppBase.any_from_conf(req, problem, rargs)
mini_app.setup_output(save_format=opts.save_format,
dump_format=opts.dump_format,
post_process_hook=self.post_process_hook,
file_per_var=opts.file_per_var)
store(mini_app)
problem.clear_equations()
# Pass only the direct dependencies, not the indirect ones.
dep_requires = rargs.get('requires', [])
data = {}
for key in dep_requires:
data[key] = dependencies[key]
dep = mini_app(data=data)
dependencies[req] = dep
output('...done')
return dependencies
def call(self, ret_all=False):
problem = self.problem
opts = self.app_options
coef_info = getattr(self.conf, opts.coefs)
compute_names = set(get_default(opts.compute_only, coef_info.keys()))
compute_names = ['c.' + key for key in compute_names]
is_store_filenames = coef_info.pop('filenames', None) is not None
try:
compute_names.remove('c.filenames')
except:
pass
dependencies = {}
save_names = {}
dump_names = {}
def store_filenames(app):
if not '(not_set)' in app.get_save_name_base():
save_names[app.name] = app.get_save_name_base()
if not '(not_set)' in app.get_dump_name_base():
dump_names[app.name] = app.get_dump_name_base()
# Some coefficients can require other coefficients - resolve their
# order here.
req_info = self.conf.get(opts.requirements, {})
info = copy(coef_info)
info.update(req_info)
all_deps = set(compute_names)
sorted_names = []
for coef_name in compute_names:
cargs = coef_info[coef_name[2:]]
requires = cargs.get('requires', [])
deps = insert_sub_reqs(copy(requires), [], info)
all_deps.update(deps)
aux = [key for key in deps if key.startswith('c.')] + [coef_name]
sorted_names.extend(aux)
sorted_coef_names = []
for name in sorted_names:
if name[2:] not in sorted_coef_names:
sorted_coef_names.append(name[2:])
coefs = Struct()
for coef_name in sorted_coef_names:
cargs = coef_info[coef_name]
| output('computing %s...' % coef_name) | sfepy.base.base.output |
from copy import copy
from sfepy.base.base import output, get_default, Struct
from sfepy.applications import PDESolverApp, Application
from coefs_base import MiniAppBase
def insert_sub_reqs(reqs, levels, req_info):
"""Recursively build all requirements in correct order."""
all_reqs = []
for _, req in enumerate(reqs):
# Coefficients are referenced as 'c.<name>'...
areq = req
if req.startswith('c.'):
areq = req[2:]
try:
rargs = req_info[areq]
except KeyError:
raise ValueError('requirement "%s" is not defined!' % req)
sub_reqs = rargs.get('requires', [])
if req in levels:
raise ValueError('circular requirement "%s"!' % (req))
if sub_reqs:
levels.append(req)
all_reqs.extend(insert_sub_reqs(sub_reqs, levels, req_info))
levels.pop()
if req in all_reqs:
raise ValueError('circular requirement "%s"!' % (req))
else:
all_reqs.append(req)
return all_reqs
class HomogenizationEngine(PDESolverApp):
@staticmethod
def process_options(options):
get = options.get
return Struct(coefs=get('coefs', None,
'missing "coefs" in options!'),
requirements=get('requirements', None,
'missing "requirements" in options!'),
compute_only=get('compute_only', None),
save_format=get('save_format', 'vtk'),
dump_format=get('dump_format', 'h5'),
coefs_info=get('coefs_info', None))
def __init__(self, problem, options, app_options=None,
volume=None, output_prefix='he:', **kwargs):
"""Bypasses PDESolverApp.__init__()!"""
Application.__init__(self, problem.conf, options, output_prefix,
**kwargs)
self.problem = problem
self.setup_options(app_options=app_options)
self.setup_output_info(self.problem, self.options)
if volume is None:
self.volume = self.problem.evaluate(self.app_options.total_volume)
else:
self.volume = volume
def setup_options(self, app_options=None):
PDESolverApp.setup_options(self)
app_options = get_default(app_options, self.conf.options)
po = HomogenizationEngine.process_options
self.app_options += po(app_options)
def compute_requirements(self, requirements, dependencies, store):
problem = self.problem
opts = self.app_options
req_info = getattr(self.conf, opts.requirements)
requires = insert_sub_reqs(copy(requirements), [], req_info)
for req in requires:
if req in dependencies and (dependencies[req] is not None):
continue
output('computing dependency %s...' % req)
rargs = req_info[req]
mini_app = MiniAppBase.any_from_conf(req, problem, rargs)
mini_app.setup_output(save_format=opts.save_format,
dump_format=opts.dump_format,
post_process_hook=self.post_process_hook,
file_per_var=opts.file_per_var)
store(mini_app)
problem.clear_equations()
# Pass only the direct dependencies, not the indirect ones.
dep_requires = rargs.get('requires', [])
data = {}
for key in dep_requires:
data[key] = dependencies[key]
dep = mini_app(data=data)
dependencies[req] = dep
output('...done')
return dependencies
def call(self, ret_all=False):
problem = self.problem
opts = self.app_options
coef_info = getattr(self.conf, opts.coefs)
compute_names = set(get_default(opts.compute_only, coef_info.keys()))
compute_names = ['c.' + key for key in compute_names]
is_store_filenames = coef_info.pop('filenames', None) is not None
try:
compute_names.remove('c.filenames')
except:
pass
dependencies = {}
save_names = {}
dump_names = {}
def store_filenames(app):
if not '(not_set)' in app.get_save_name_base():
save_names[app.name] = app.get_save_name_base()
if not '(not_set)' in app.get_dump_name_base():
dump_names[app.name] = app.get_dump_name_base()
# Some coefficients can require other coefficients - resolve their
# order here.
req_info = self.conf.get(opts.requirements, {})
info = copy(coef_info)
info.update(req_info)
all_deps = set(compute_names)
sorted_names = []
for coef_name in compute_names:
cargs = coef_info[coef_name[2:]]
requires = cargs.get('requires', [])
deps = insert_sub_reqs(copy(requires), [], info)
all_deps.update(deps)
aux = [key for key in deps if key.startswith('c.')] + [coef_name]
sorted_names.extend(aux)
sorted_coef_names = []
for name in sorted_names:
if name[2:] not in sorted_coef_names:
sorted_coef_names.append(name[2:])
coefs = Struct()
for coef_name in sorted_coef_names:
cargs = coef_info[coef_name]
output('computing %s...' % coef_name)
requires = cargs.get('requires', [])
requirements = [name for name in requires if not
name.startswith('c.')]
self.compute_requirements(requirements, dependencies,
store_filenames)
for name in requires:
if name.startswith('c.'):
dependencies[name] = getattr(coefs, name[2:])
mini_app = MiniAppBase.any_from_conf(coef_name, problem, cargs)
problem.clear_equations()
# Pass only the direct dependencies, not the indirect ones.
data = {}
for key in requires:
data[key] = dependencies[key]
val = mini_app(self.volume, data=data)
setattr(coefs, coef_name, val)
| output('...done') | sfepy.base.base.output |
# 26.02.2007, c
# last revision: 25.02.2008
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/3d/elbow2.mesh'
options = {
'nls' : 'newton',
'ls' : 'ls',
'post_process_hook' : 'verify_incompressibility',
}
field_1 = {
'name' : '3_velocity',
'dtype' : 'real',
'shape' : (3,),
'region' : 'Omega',
'approx_order' : '1B',
}
field_2 = {
'name' : 'pressure',
'dtype' : 'real',
'shape' : (1,),
'region' : 'Omega',
'approx_order' : 1,
}
# Can use logical operations '&' (and), '|' (or).
region_1000 = {
'name' : 'Omega',
'select' : 'elements of group 6',
}
region_0 = {
'name' : 'Walls',
'select' : 'nodes of surface -n (r.Outlet +n r.Inlet)',
'can_cells' : False,
}
region_1 = {
'name' : 'Inlet',
'select' : 'nodes by cinc0', # In
'can_cells' : False,
}
region_2 = {
'name' : 'Outlet',
'select' : 'nodes by cinc1', # Out
'can_cells' : False,
}
ebc_1 = {
'name' : 'Walls',
'region' : 'Walls',
'dofs' : {'u.all' : 0.0},
}
ebc_2 = {
'name' : 'Inlet',
'region' : 'Inlet',
'dofs' : {'u.1' : 1.0, 'u.[0,2]' : 0.0},
}
material_1 = {
'name' : 'fluid',
'values' : {
'viscosity' : 1.25e-3,
'density' : 1e0,
},
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '3_velocity',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '3_velocity',
'dual' : 'u',
}
variable_3 = {
'name' : 'p',
'kind' : 'unknown field',
'field' : 'pressure',
'order' : 1,
}
variable_4 = {
'name' : 'q',
'kind' : 'test field',
'field' : 'pressure',
'dual' : 'p',
}
variable_5 = {
'name' : 'pp',
'kind' : 'parameter field',
'field' : 'pressure',
'like' : 'p',
}
integral_1 = {
'name' : 'i1',
'kind' : 'v',
'quadrature' : 'gauss_o2_d3',
}
integral_2 = {
'name' : 'i2',
'kind' : 'v',
'quadrature' : 'gauss_o3_d3',
}
##
# Stationary Navier-Stokes equations.
equations = {
'balance' :
"""+ dw_div_grad.i2.Omega( fluid.viscosity, v, u )
+ dw_convect.i2.Omega( v, u )
- dw_stokes.i1.Omega( v, p ) = 0""",
'incompressibility' :
"""dw_stokes.i1.Omega( u, q ) = 0""",
}
##
# FE assembling parameters.
fe = {
'chunk_size' : 1000
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 5,
'eps_a' : 1e-8,
'eps_r' : 1.0,
'macheps' : 1e-16,
'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red).
'ls_red' : 0.1,
'ls_red_warp' : 0.001,
'ls_on' : 0.99999,
'ls_min' : 1e-5,
'check' : 0,
'delta' : 1e-6,
'is_plot' : False,
'problem' : 'nonlinear', # 'nonlinear' or 'linear' (ignore i_max)
}
def verify_incompressibility( out, problem, state, extend = False ):
"""This hook is normally used for post-processing (additional results can
be inserted into `out` dictionary), but here we just verify the weak
incompressibility condition."""
from sfepy.base.base import Struct, debug, nm, output, assert_
vv = problem.get_variables()
one = nm.ones( (vv['p'].field.n_nod,), dtype = nm.float64 )
vv['p'].data_from_any( one )
zero = problem.evaluate('dw_stokes.i1.Omega( u, p )', p=one, u=vv['u'](),
call_mode='d_eval')
| output('div( u ) = %.3e' % zero) | sfepy.base.base.output |
from __future__ import absolute_import
import numpy as nm
import sfepy.linalg as la
from sfepy.discrete.integrals import Integral
from sfepy.discrete import PolySpace
from six.moves import range
def prepare_remap(indices, n_full):
"""
Prepare vector for remapping range `[0, n_full]` to its subset given
by `indices`.
"""
remap = nm.empty((n_full,), dtype=nm.int32)
remap.fill(-1)
remap[indices] = nm.arange(indices.shape[0], dtype=nm.int32)
return remap
def invert_remap(remap):
"""
Return the inverse of `remap`, i.e. a mapping from a sub-range
indices to a full range, see :func:`prepare_remap()`.
"""
if remap is not None:
inverse = nm.where(remap >= 0)[0].astype(nm.int32)
else:
inverse = None
return inverse
def prepare_translate(old_indices, new_indices):
"""
Prepare vector for translating `old_indices` to `new_indices`.
Returns
-------
translate : array
The translation vector. Then `new_ar = translate[old_ar]`.
"""
old_indices = nm.asarray(old_indices)
new_indices = nm.asarray(new_indices)
translate = nm.zeros(old_indices.max() + 1, dtype=new_indices.dtype)
translate[old_indices] = new_indices
return translate
def compute_nodal_normals(nodes, region, field, return_imap=False):
"""
Nodal normals are computed by simple averaging of element normals of
elements every node is contained in.
"""
dim = region.dim
field.domain.create_surface_group(region)
field.setup_surface_data(region)
# Custom integral with quadrature points in nodes.
ps = PolySpace.any_from_args('', field.gel.surface_facet,
field.approx_order)
qp_coors = ps.node_coors
# Unit normals -> weights = ones.
qp_weights = nm.ones(qp_coors.shape[0], dtype=nm.float64)
integral = | Integral('aux', coors=qp_coors, weights=qp_weights) | sfepy.discrete.integrals.Integral |
from __future__ import absolute_import
import numpy as nm
import sfepy.linalg as la
from sfepy.discrete.integrals import Integral
from sfepy.discrete import PolySpace
from six.moves import range
def prepare_remap(indices, n_full):
"""
Prepare vector for remapping range `[0, n_full]` to its subset given
by `indices`.
"""
remap = nm.empty((n_full,), dtype=nm.int32)
remap.fill(-1)
remap[indices] = nm.arange(indices.shape[0], dtype=nm.int32)
return remap
def invert_remap(remap):
"""
Return the inverse of `remap`, i.e. a mapping from a sub-range
indices to a full range, see :func:`prepare_remap()`.
"""
if remap is not None:
inverse = nm.where(remap >= 0)[0].astype(nm.int32)
else:
inverse = None
return inverse
def prepare_translate(old_indices, new_indices):
"""
Prepare vector for translating `old_indices` to `new_indices`.
Returns
-------
translate : array
The translation vector. Then `new_ar = translate[old_ar]`.
"""
old_indices = nm.asarray(old_indices)
new_indices = nm.asarray(new_indices)
translate = nm.zeros(old_indices.max() + 1, dtype=new_indices.dtype)
translate[old_indices] = new_indices
return translate
def compute_nodal_normals(nodes, region, field, return_imap=False):
"""
Nodal normals are computed by simple averaging of element normals of
elements every node is contained in.
"""
dim = region.dim
field.domain.create_surface_group(region)
field.setup_surface_data(region)
# Custom integral with quadrature points in nodes.
ps = PolySpace.any_from_args('', field.gel.surface_facet,
field.approx_order)
qp_coors = ps.node_coors
# Unit normals -> weights = ones.
qp_weights = nm.ones(qp_coors.shape[0], dtype=nm.float64)
integral = Integral('aux', coors=qp_coors, weights=qp_weights)
normals = nm.zeros((nodes.shape[0], dim), dtype=nm.float64)
mask = nm.zeros((nodes.max() + 1,), dtype=nm.int32)
imap = nm.empty_like(mask)
imap.fill(nodes.shape[0]) # out-of-range index for normals.
imap[nodes] = nm.arange(nodes.shape[0], dtype=nm.int32)
cmap, _ = field.get_mapping(region, integral, 'surface')
e_normals = cmap.normal[..., 0]
sd = field.surface_data[region.name]
econn = sd.get_connectivity()
mask[econn] += 1
# normals[imap[econn]] += e_normals
im = imap[econn]
for ii, en in enumerate(e_normals):
normals[im[ii]] += en
# All nodes must have a normal.
if not nm.all(mask[nodes] > 0):
raise ValueError('region %s has not complete faces!' % region.name)
norm = la.norm_l2_along_axis(normals)[:, nm.newaxis]
if (norm < 1e-15).any():
raise ValueError('zero nodal normal! (a node in volume?)')
normals /= norm
if return_imap:
return normals, imap
else:
return normals
def _get_edge_path(graph, seed, mask, cycle=False):
"""
Get a path in an edge graph starting with seed. The mask is incremented by
one at positions of the path vertices.
"""
if mask[seed]:
return []
path = [seed]
mask[seed] = 1
row = graph[seed].indices
nv = len(row)
while nv:
if nv == 2:
if mask[row[0]]:
if mask[row[1]]:
if cycle:
path.append(seed)
break
else:
vert = row[1]
else:
vert = row[0]
elif mask[row[0]]:
break
else:
vert = row[0]
path.append(vert)
mask[vert] = 1
row = graph[vert].indices
nv = len(row)
path = nm.array(path, dtype=nm.int32)
return path
def get_edge_paths(graph, mask):
"""
Get all edge paths in a graph with non-masked vertices. The mask is
updated.
"""
nodes = nm.unique(graph.indices)
npv = nm.diff(graph.indptr)
if npv.max() > 2:
raise ValueError('more than 2 edges sharing a vertex!')
seeds = nm.where(npv == 1)[0]
# 1. get paths.
paths = []
for seed in seeds:
path = _get_edge_path(graph, seed, mask)
if len(path):
paths.append(path)
# 2. get possible remaing cycles.
while 1:
ii = nm.where(mask[nodes] == 0)[0]
if not len(ii):
break
path = _get_edge_path(graph, nodes[ii[0]], mask, cycle=True)
if len(path):
paths.append(path)
return paths
def compute_nodal_edge_dirs(nodes, region, field, return_imap=False):
"""
Nodal edge directions are computed by simple averaging of direction vectors
of edges a node is contained in. Edges are assumed to be straight and a
node must be on a single edge (a border node) or shared by exactly two
edges.
"""
coors = region.domain.mesh.coors
dim = coors.shape[1]
graph = region.get_edge_graph()
imap = prepare_remap(nodes, nodes.max() + 1)
mask = nm.zeros_like(imap)
try:
paths = get_edge_paths(graph, mask)
except ValueError:
raise ValueError('more than 2 edges sharing a vertex in region %s!'
% region.name)
# All nodes must have an edge direction.
if not nm.all(mask[nodes]):
raise ValueError('region %s has not complete edges!' % region.name)
edge_dirs = nm.zeros((nodes.shape[0], dim), dtype=nm.float64)
for path in paths:
pcoors = coors[path]
edirs = nm.diff(pcoors, axis=0)
la.normalize_vectors(edirs, eps=1e-12)
im = imap[nm.c_[path[:-1], path[1:]]]
for ii, edir in enumerate(edirs):
edge_dirs[im[ii]] += edir
| la.normalize_vectors(edge_dirs, eps=1e-12) | sfepy.linalg.normalize_vectors |
from __future__ import absolute_import
import numpy as nm
import sfepy.linalg as la
from sfepy.discrete.integrals import Integral
from sfepy.discrete import PolySpace
from six.moves import range
def prepare_remap(indices, n_full):
"""
Prepare vector for remapping range `[0, n_full]` to its subset given
by `indices`.
"""
remap = nm.empty((n_full,), dtype=nm.int32)
remap.fill(-1)
remap[indices] = nm.arange(indices.shape[0], dtype=nm.int32)
return remap
def invert_remap(remap):
"""
Return the inverse of `remap`, i.e. a mapping from a sub-range
indices to a full range, see :func:`prepare_remap()`.
"""
if remap is not None:
inverse = nm.where(remap >= 0)[0].astype(nm.int32)
else:
inverse = None
return inverse
def prepare_translate(old_indices, new_indices):
"""
Prepare vector for translating `old_indices` to `new_indices`.
Returns
-------
translate : array
The translation vector. Then `new_ar = translate[old_ar]`.
"""
old_indices = nm.asarray(old_indices)
new_indices = nm.asarray(new_indices)
translate = nm.zeros(old_indices.max() + 1, dtype=new_indices.dtype)
translate[old_indices] = new_indices
return translate
def compute_nodal_normals(nodes, region, field, return_imap=False):
"""
Nodal normals are computed by simple averaging of element normals of
elements every node is contained in.
"""
dim = region.dim
field.domain.create_surface_group(region)
field.setup_surface_data(region)
# Custom integral with quadrature points in nodes.
ps = PolySpace.any_from_args('', field.gel.surface_facet,
field.approx_order)
qp_coors = ps.node_coors
# Unit normals -> weights = ones.
qp_weights = nm.ones(qp_coors.shape[0], dtype=nm.float64)
integral = Integral('aux', coors=qp_coors, weights=qp_weights)
normals = nm.zeros((nodes.shape[0], dim), dtype=nm.float64)
mask = nm.zeros((nodes.max() + 1,), dtype=nm.int32)
imap = nm.empty_like(mask)
imap.fill(nodes.shape[0]) # out-of-range index for normals.
imap[nodes] = nm.arange(nodes.shape[0], dtype=nm.int32)
cmap, _ = field.get_mapping(region, integral, 'surface')
e_normals = cmap.normal[..., 0]
sd = field.surface_data[region.name]
econn = sd.get_connectivity()
mask[econn] += 1
# normals[imap[econn]] += e_normals
im = imap[econn]
for ii, en in enumerate(e_normals):
normals[im[ii]] += en
# All nodes must have a normal.
if not nm.all(mask[nodes] > 0):
raise ValueError('region %s has not complete faces!' % region.name)
norm = | la.norm_l2_along_axis(normals) | sfepy.linalg.norm_l2_along_axis |
from __future__ import absolute_import
import numpy as nm
import sfepy.linalg as la
from sfepy.discrete.integrals import Integral
from sfepy.discrete import PolySpace
from six.moves import range
def prepare_remap(indices, n_full):
"""
Prepare vector for remapping range `[0, n_full]` to its subset given
by `indices`.
"""
remap = nm.empty((n_full,), dtype=nm.int32)
remap.fill(-1)
remap[indices] = nm.arange(indices.shape[0], dtype=nm.int32)
return remap
def invert_remap(remap):
"""
Return the inverse of `remap`, i.e. a mapping from a sub-range
indices to a full range, see :func:`prepare_remap()`.
"""
if remap is not None:
inverse = nm.where(remap >= 0)[0].astype(nm.int32)
else:
inverse = None
return inverse
def prepare_translate(old_indices, new_indices):
"""
Prepare vector for translating `old_indices` to `new_indices`.
Returns
-------
translate : array
The translation vector. Then `new_ar = translate[old_ar]`.
"""
old_indices = nm.asarray(old_indices)
new_indices = nm.asarray(new_indices)
translate = nm.zeros(old_indices.max() + 1, dtype=new_indices.dtype)
translate[old_indices] = new_indices
return translate
def compute_nodal_normals(nodes, region, field, return_imap=False):
"""
Nodal normals are computed by simple averaging of element normals of
elements every node is contained in.
"""
dim = region.dim
field.domain.create_surface_group(region)
field.setup_surface_data(region)
# Custom integral with quadrature points in nodes.
ps = PolySpace.any_from_args('', field.gel.surface_facet,
field.approx_order)
qp_coors = ps.node_coors
# Unit normals -> weights = ones.
qp_weights = nm.ones(qp_coors.shape[0], dtype=nm.float64)
integral = Integral('aux', coors=qp_coors, weights=qp_weights)
normals = nm.zeros((nodes.shape[0], dim), dtype=nm.float64)
mask = nm.zeros((nodes.max() + 1,), dtype=nm.int32)
imap = nm.empty_like(mask)
imap.fill(nodes.shape[0]) # out-of-range index for normals.
imap[nodes] = nm.arange(nodes.shape[0], dtype=nm.int32)
cmap, _ = field.get_mapping(region, integral, 'surface')
e_normals = cmap.normal[..., 0]
sd = field.surface_data[region.name]
econn = sd.get_connectivity()
mask[econn] += 1
# normals[imap[econn]] += e_normals
im = imap[econn]
for ii, en in enumerate(e_normals):
normals[im[ii]] += en
# All nodes must have a normal.
if not nm.all(mask[nodes] > 0):
raise ValueError('region %s has not complete faces!' % region.name)
norm = la.norm_l2_along_axis(normals)[:, nm.newaxis]
if (norm < 1e-15).any():
raise ValueError('zero nodal normal! (a node in volume?)')
normals /= norm
if return_imap:
return normals, imap
else:
return normals
def _get_edge_path(graph, seed, mask, cycle=False):
"""
Get a path in an edge graph starting with seed. The mask is incremented by
one at positions of the path vertices.
"""
if mask[seed]:
return []
path = [seed]
mask[seed] = 1
row = graph[seed].indices
nv = len(row)
while nv:
if nv == 2:
if mask[row[0]]:
if mask[row[1]]:
if cycle:
path.append(seed)
break
else:
vert = row[1]
else:
vert = row[0]
elif mask[row[0]]:
break
else:
vert = row[0]
path.append(vert)
mask[vert] = 1
row = graph[vert].indices
nv = len(row)
path = nm.array(path, dtype=nm.int32)
return path
def get_edge_paths(graph, mask):
"""
Get all edge paths in a graph with non-masked vertices. The mask is
updated.
"""
nodes = nm.unique(graph.indices)
npv = nm.diff(graph.indptr)
if npv.max() > 2:
raise ValueError('more than 2 edges sharing a vertex!')
seeds = nm.where(npv == 1)[0]
# 1. get paths.
paths = []
for seed in seeds:
path = _get_edge_path(graph, seed, mask)
if len(path):
paths.append(path)
# 2. get possible remaing cycles.
while 1:
ii = nm.where(mask[nodes] == 0)[0]
if not len(ii):
break
path = _get_edge_path(graph, nodes[ii[0]], mask, cycle=True)
if len(path):
paths.append(path)
return paths
def compute_nodal_edge_dirs(nodes, region, field, return_imap=False):
"""
Nodal edge directions are computed by simple averaging of direction vectors
of edges a node is contained in. Edges are assumed to be straight and a
node must be on a single edge (a border node) or shared by exactly two
edges.
"""
coors = region.domain.mesh.coors
dim = coors.shape[1]
graph = region.get_edge_graph()
imap = prepare_remap(nodes, nodes.max() + 1)
mask = nm.zeros_like(imap)
try:
paths = get_edge_paths(graph, mask)
except ValueError:
raise ValueError('more than 2 edges sharing a vertex in region %s!'
% region.name)
# All nodes must have an edge direction.
if not nm.all(mask[nodes]):
raise ValueError('region %s has not complete edges!' % region.name)
edge_dirs = nm.zeros((nodes.shape[0], dim), dtype=nm.float64)
for path in paths:
pcoors = coors[path]
edirs = nm.diff(pcoors, axis=0)
| la.normalize_vectors(edirs, eps=1e-12) | sfepy.linalg.normalize_vectors |
from __future__ import absolute_import
import numpy as nm
import sfepy.linalg as la
from sfepy.discrete.integrals import Integral
from sfepy.discrete import PolySpace
from six.moves import range
def prepare_remap(indices, n_full):
"""
Prepare vector for remapping range `[0, n_full]` to its subset given
by `indices`.
"""
remap = nm.empty((n_full,), dtype=nm.int32)
remap.fill(-1)
remap[indices] = nm.arange(indices.shape[0], dtype=nm.int32)
return remap
def invert_remap(remap):
"""
Return the inverse of `remap`, i.e. a mapping from a sub-range
indices to a full range, see :func:`prepare_remap()`.
"""
if remap is not None:
inverse = nm.where(remap >= 0)[0].astype(nm.int32)
else:
inverse = None
return inverse
def prepare_translate(old_indices, new_indices):
"""
Prepare vector for translating `old_indices` to `new_indices`.
Returns
-------
translate : array
The translation vector. Then `new_ar = translate[old_ar]`.
"""
old_indices = nm.asarray(old_indices)
new_indices = nm.asarray(new_indices)
translate = nm.zeros(old_indices.max() + 1, dtype=new_indices.dtype)
translate[old_indices] = new_indices
return translate
def compute_nodal_normals(nodes, region, field, return_imap=False):
"""
Nodal normals are computed by simple averaging of element normals of
elements every node is contained in.
"""
dim = region.dim
field.domain.create_surface_group(region)
field.setup_surface_data(region)
# Custom integral with quadrature points in nodes.
ps = PolySpace.any_from_args('', field.gel.surface_facet,
field.approx_order)
qp_coors = ps.node_coors
# Unit normals -> weights = ones.
qp_weights = nm.ones(qp_coors.shape[0], dtype=nm.float64)
integral = Integral('aux', coors=qp_coors, weights=qp_weights)
normals = nm.zeros((nodes.shape[0], dim), dtype=nm.float64)
mask = nm.zeros((nodes.max() + 1,), dtype=nm.int32)
imap = nm.empty_like(mask)
imap.fill(nodes.shape[0]) # out-of-range index for normals.
imap[nodes] = nm.arange(nodes.shape[0], dtype=nm.int32)
cmap, _ = field.get_mapping(region, integral, 'surface')
e_normals = cmap.normal[..., 0]
sd = field.surface_data[region.name]
econn = sd.get_connectivity()
mask[econn] += 1
# normals[imap[econn]] += e_normals
im = imap[econn]
for ii, en in enumerate(e_normals):
normals[im[ii]] += en
# All nodes must have a normal.
if not nm.all(mask[nodes] > 0):
raise ValueError('region %s has not complete faces!' % region.name)
norm = la.norm_l2_along_axis(normals)[:, nm.newaxis]
if (norm < 1e-15).any():
raise ValueError('zero nodal normal! (a node in volume?)')
normals /= norm
if return_imap:
return normals, imap
else:
return normals
def _get_edge_path(graph, seed, mask, cycle=False):
"""
Get a path in an edge graph starting with seed. The mask is incremented by
one at positions of the path vertices.
"""
if mask[seed]:
return []
path = [seed]
mask[seed] = 1
row = graph[seed].indices
nv = len(row)
while nv:
if nv == 2:
if mask[row[0]]:
if mask[row[1]]:
if cycle:
path.append(seed)
break
else:
vert = row[1]
else:
vert = row[0]
elif mask[row[0]]:
break
else:
vert = row[0]
path.append(vert)
mask[vert] = 1
row = graph[vert].indices
nv = len(row)
path = nm.array(path, dtype=nm.int32)
return path
def get_edge_paths(graph, mask):
"""
Get all edge paths in a graph with non-masked vertices. The mask is
updated.
"""
nodes = nm.unique(graph.indices)
npv = nm.diff(graph.indptr)
if npv.max() > 2:
raise ValueError('more than 2 edges sharing a vertex!')
seeds = nm.where(npv == 1)[0]
# 1. get paths.
paths = []
for seed in seeds:
path = _get_edge_path(graph, seed, mask)
if len(path):
paths.append(path)
# 2. get possible remaing cycles.
while 1:
ii = nm.where(mask[nodes] == 0)[0]
if not len(ii):
break
path = _get_edge_path(graph, nodes[ii[0]], mask, cycle=True)
if len(path):
paths.append(path)
return paths
def compute_nodal_edge_dirs(nodes, region, field, return_imap=False):
"""
Nodal edge directions are computed by simple averaging of direction vectors
of edges a node is contained in. Edges are assumed to be straight and a
node must be on a single edge (a border node) or shared by exactly two
edges.
"""
coors = region.domain.mesh.coors
dim = coors.shape[1]
graph = region.get_edge_graph()
imap = prepare_remap(nodes, nodes.max() + 1)
mask = nm.zeros_like(imap)
try:
paths = get_edge_paths(graph, mask)
except ValueError:
raise ValueError('more than 2 edges sharing a vertex in region %s!'
% region.name)
# All nodes must have an edge direction.
if not nm.all(mask[nodes]):
raise ValueError('region %s has not complete edges!' % region.name)
edge_dirs = nm.zeros((nodes.shape[0], dim), dtype=nm.float64)
for path in paths:
pcoors = coors[path]
edirs = nm.diff(pcoors, axis=0)
la.normalize_vectors(edirs, eps=1e-12)
im = imap[nm.c_[path[:-1], path[1:]]]
for ii, edir in enumerate(edirs):
edge_dirs[im[ii]] += edir
la.normalize_vectors(edge_dirs, eps=1e-12)
if return_imap:
return edge_dirs, imap
else:
return edge_dirs
def get_min_value(dofs):
"""
Get a reasonable minimal value of DOFs suitable for extending over a
whole domain.
"""
if dofs.shape[1] > 1: # Vector.
val = 0.0
else: # Scalar.
val = dofs.min()
return val
def extend_cell_data(data, domain, rname, val=None, is_surface=False,
average_surface=True):
"""
Extend cell data defined in a region to the whole domain.
Parameters
----------
data : array
The data defined in the region.
domain : FEDomain instance
The FE domain.
rname : str
The region name.
val : float, optional
The value for filling cells not covered by the region. If not given,
the smallest value in data is used.
is_surface : bool
If True, the data are defined on a surface region. In that case the
values are averaged or summed into the cells containing the region
surface faces (a cell can have several faces of the surface), see
`average_surface`.
average_surface : bool
If True, the data defined on a surface region are averaged, otherwise
the data are summed.
Returns
-------
edata : array
The data extended to all domain elements.
"""
n_el = domain.shape.n_el
if data.shape[0] == n_el: return data
if val is None:
if data.shape[2] > 1: # Vector.
val = nm.amin(nm.abs(data))
else: # Scalar.
val = nm.amin(data)
edata = nm.empty((n_el,) + data.shape[1:], dtype=data.dtype)
edata.fill(val)
region = domain.regions[rname]
if not is_surface:
edata[region.get_cells()] = data
else:
cells = region.get_cells(true_cells_only=False)
ucells = nm.unique(cells)
if len(cells) != len(region.facets):
raise ValueError('region %s has an inner face!'
% region.name)
if average_surface:
avg = nm.bincount(cells, minlength=n_el)[ucells]
else:
avg = 1.0
for ic in range(data.shape[2]):
if nm.isrealobj(data):
evals = nm.bincount(cells, weights=data[:, 0, ic, 0],
minlength=n_el)[ucells]
else:
evals = (nm.bincount(cells, weights=data[:, 0, ic, 0].real,
minlength=n_el)[ucells]
+ 1j *
nm.bincount(cells, weights=data[:, 0, ic, 0].imag,
minlength=n_el)[ucells])
edata[ucells, 0, ic, 0] = evals / avg
return edata
def refine_mesh(filename, level):
"""
Uniformly refine `level`-times a mesh given by `filename`.
The refined mesh is saved to a file with name constructed from base
name of `filename` and `level`-times appended `'_r'` suffix.
Parameters
----------
filename : str
The mesh file name.
level : int
The refinement level.
"""
import os
from sfepy.base.base import output
from sfepy.discrete.fem import Mesh, FEDomain
if level > 0:
mesh = | Mesh.from_file(filename) | sfepy.discrete.fem.Mesh.from_file |
from __future__ import absolute_import
import numpy as nm
import sfepy.linalg as la
from sfepy.discrete.integrals import Integral
from sfepy.discrete import PolySpace
from six.moves import range
def prepare_remap(indices, n_full):
"""
Prepare vector for remapping range `[0, n_full]` to its subset given
by `indices`.
"""
remap = nm.empty((n_full,), dtype=nm.int32)
remap.fill(-1)
remap[indices] = nm.arange(indices.shape[0], dtype=nm.int32)
return remap
def invert_remap(remap):
"""
Return the inverse of `remap`, i.e. a mapping from a sub-range
indices to a full range, see :func:`prepare_remap()`.
"""
if remap is not None:
inverse = nm.where(remap >= 0)[0].astype(nm.int32)
else:
inverse = None
return inverse
def prepare_translate(old_indices, new_indices):
"""
Prepare vector for translating `old_indices` to `new_indices`.
Returns
-------
translate : array
The translation vector. Then `new_ar = translate[old_ar]`.
"""
old_indices = nm.asarray(old_indices)
new_indices = nm.asarray(new_indices)
translate = nm.zeros(old_indices.max() + 1, dtype=new_indices.dtype)
translate[old_indices] = new_indices
return translate
def compute_nodal_normals(nodes, region, field, return_imap=False):
"""
Nodal normals are computed by simple averaging of element normals of
elements every node is contained in.
"""
dim = region.dim
field.domain.create_surface_group(region)
field.setup_surface_data(region)
# Custom integral with quadrature points in nodes.
ps = PolySpace.any_from_args('', field.gel.surface_facet,
field.approx_order)
qp_coors = ps.node_coors
# Unit normals -> weights = ones.
qp_weights = nm.ones(qp_coors.shape[0], dtype=nm.float64)
integral = Integral('aux', coors=qp_coors, weights=qp_weights)
normals = nm.zeros((nodes.shape[0], dim), dtype=nm.float64)
mask = nm.zeros((nodes.max() + 1,), dtype=nm.int32)
imap = nm.empty_like(mask)
imap.fill(nodes.shape[0]) # out-of-range index for normals.
imap[nodes] = nm.arange(nodes.shape[0], dtype=nm.int32)
cmap, _ = field.get_mapping(region, integral, 'surface')
e_normals = cmap.normal[..., 0]
sd = field.surface_data[region.name]
econn = sd.get_connectivity()
mask[econn] += 1
# normals[imap[econn]] += e_normals
im = imap[econn]
for ii, en in enumerate(e_normals):
normals[im[ii]] += en
# All nodes must have a normal.
if not nm.all(mask[nodes] > 0):
raise ValueError('region %s has not complete faces!' % region.name)
norm = la.norm_l2_along_axis(normals)[:, nm.newaxis]
if (norm < 1e-15).any():
raise ValueError('zero nodal normal! (a node in volume?)')
normals /= norm
if return_imap:
return normals, imap
else:
return normals
def _get_edge_path(graph, seed, mask, cycle=False):
"""
Get a path in an edge graph starting with seed. The mask is incremented by
one at positions of the path vertices.
"""
if mask[seed]:
return []
path = [seed]
mask[seed] = 1
row = graph[seed].indices
nv = len(row)
while nv:
if nv == 2:
if mask[row[0]]:
if mask[row[1]]:
if cycle:
path.append(seed)
break
else:
vert = row[1]
else:
vert = row[0]
elif mask[row[0]]:
break
else:
vert = row[0]
path.append(vert)
mask[vert] = 1
row = graph[vert].indices
nv = len(row)
path = nm.array(path, dtype=nm.int32)
return path
def get_edge_paths(graph, mask):
"""
Get all edge paths in a graph with non-masked vertices. The mask is
updated.
"""
nodes = nm.unique(graph.indices)
npv = nm.diff(graph.indptr)
if npv.max() > 2:
raise ValueError('more than 2 edges sharing a vertex!')
seeds = nm.where(npv == 1)[0]
# 1. get paths.
paths = []
for seed in seeds:
path = _get_edge_path(graph, seed, mask)
if len(path):
paths.append(path)
# 2. get possible remaing cycles.
while 1:
ii = nm.where(mask[nodes] == 0)[0]
if not len(ii):
break
path = _get_edge_path(graph, nodes[ii[0]], mask, cycle=True)
if len(path):
paths.append(path)
return paths
def compute_nodal_edge_dirs(nodes, region, field, return_imap=False):
"""
Nodal edge directions are computed by simple averaging of direction vectors
of edges a node is contained in. Edges are assumed to be straight and a
node must be on a single edge (a border node) or shared by exactly two
edges.
"""
coors = region.domain.mesh.coors
dim = coors.shape[1]
graph = region.get_edge_graph()
imap = prepare_remap(nodes, nodes.max() + 1)
mask = nm.zeros_like(imap)
try:
paths = get_edge_paths(graph, mask)
except ValueError:
raise ValueError('more than 2 edges sharing a vertex in region %s!'
% region.name)
# All nodes must have an edge direction.
if not nm.all(mask[nodes]):
raise ValueError('region %s has not complete edges!' % region.name)
edge_dirs = nm.zeros((nodes.shape[0], dim), dtype=nm.float64)
for path in paths:
pcoors = coors[path]
edirs = nm.diff(pcoors, axis=0)
la.normalize_vectors(edirs, eps=1e-12)
im = imap[nm.c_[path[:-1], path[1:]]]
for ii, edir in enumerate(edirs):
edge_dirs[im[ii]] += edir
la.normalize_vectors(edge_dirs, eps=1e-12)
if return_imap:
return edge_dirs, imap
else:
return edge_dirs
def get_min_value(dofs):
"""
Get a reasonable minimal value of DOFs suitable for extending over a
whole domain.
"""
if dofs.shape[1] > 1: # Vector.
val = 0.0
else: # Scalar.
val = dofs.min()
return val
def extend_cell_data(data, domain, rname, val=None, is_surface=False,
average_surface=True):
"""
Extend cell data defined in a region to the whole domain.
Parameters
----------
data : array
The data defined in the region.
domain : FEDomain instance
The FE domain.
rname : str
The region name.
val : float, optional
The value for filling cells not covered by the region. If not given,
the smallest value in data is used.
is_surface : bool
If True, the data are defined on a surface region. In that case the
values are averaged or summed into the cells containing the region
surface faces (a cell can have several faces of the surface), see
`average_surface`.
average_surface : bool
If True, the data defined on a surface region are averaged, otherwise
the data are summed.
Returns
-------
edata : array
The data extended to all domain elements.
"""
n_el = domain.shape.n_el
if data.shape[0] == n_el: return data
if val is None:
if data.shape[2] > 1: # Vector.
val = nm.amin(nm.abs(data))
else: # Scalar.
val = nm.amin(data)
edata = nm.empty((n_el,) + data.shape[1:], dtype=data.dtype)
edata.fill(val)
region = domain.regions[rname]
if not is_surface:
edata[region.get_cells()] = data
else:
cells = region.get_cells(true_cells_only=False)
ucells = nm.unique(cells)
if len(cells) != len(region.facets):
raise ValueError('region %s has an inner face!'
% region.name)
if average_surface:
avg = nm.bincount(cells, minlength=n_el)[ucells]
else:
avg = 1.0
for ic in range(data.shape[2]):
if nm.isrealobj(data):
evals = nm.bincount(cells, weights=data[:, 0, ic, 0],
minlength=n_el)[ucells]
else:
evals = (nm.bincount(cells, weights=data[:, 0, ic, 0].real,
minlength=n_el)[ucells]
+ 1j *
nm.bincount(cells, weights=data[:, 0, ic, 0].imag,
minlength=n_el)[ucells])
edata[ucells, 0, ic, 0] = evals / avg
return edata
def refine_mesh(filename, level):
"""
Uniformly refine `level`-times a mesh given by `filename`.
The refined mesh is saved to a file with name constructed from base
name of `filename` and `level`-times appended `'_r'` suffix.
Parameters
----------
filename : str
The mesh file name.
level : int
The refinement level.
"""
import os
from sfepy.base.base import output
from sfepy.discrete.fem import Mesh, FEDomain
if level > 0:
mesh = Mesh.from_file(filename)
domain = | FEDomain(mesh.name, mesh) | sfepy.discrete.fem.FEDomain |
from __future__ import absolute_import
import numpy as nm
import sfepy.linalg as la
from sfepy.discrete.integrals import Integral
from sfepy.discrete import PolySpace
from six.moves import range
def prepare_remap(indices, n_full):
"""
Prepare vector for remapping range `[0, n_full]` to its subset given
by `indices`.
"""
remap = nm.empty((n_full,), dtype=nm.int32)
remap.fill(-1)
remap[indices] = nm.arange(indices.shape[0], dtype=nm.int32)
return remap
def invert_remap(remap):
"""
Return the inverse of `remap`, i.e. a mapping from a sub-range
indices to a full range, see :func:`prepare_remap()`.
"""
if remap is not None:
inverse = nm.where(remap >= 0)[0].astype(nm.int32)
else:
inverse = None
return inverse
def prepare_translate(old_indices, new_indices):
"""
Prepare vector for translating `old_indices` to `new_indices`.
Returns
-------
translate : array
The translation vector. Then `new_ar = translate[old_ar]`.
"""
old_indices = nm.asarray(old_indices)
new_indices = nm.asarray(new_indices)
translate = nm.zeros(old_indices.max() + 1, dtype=new_indices.dtype)
translate[old_indices] = new_indices
return translate
def compute_nodal_normals(nodes, region, field, return_imap=False):
"""
Nodal normals are computed by simple averaging of element normals of
elements every node is contained in.
"""
dim = region.dim
field.domain.create_surface_group(region)
field.setup_surface_data(region)
# Custom integral with quadrature points in nodes.
ps = PolySpace.any_from_args('', field.gel.surface_facet,
field.approx_order)
qp_coors = ps.node_coors
# Unit normals -> weights = ones.
qp_weights = nm.ones(qp_coors.shape[0], dtype=nm.float64)
integral = Integral('aux', coors=qp_coors, weights=qp_weights)
normals = nm.zeros((nodes.shape[0], dim), dtype=nm.float64)
mask = nm.zeros((nodes.max() + 1,), dtype=nm.int32)
imap = nm.empty_like(mask)
imap.fill(nodes.shape[0]) # out-of-range index for normals.
imap[nodes] = nm.arange(nodes.shape[0], dtype=nm.int32)
cmap, _ = field.get_mapping(region, integral, 'surface')
e_normals = cmap.normal[..., 0]
sd = field.surface_data[region.name]
econn = sd.get_connectivity()
mask[econn] += 1
# normals[imap[econn]] += e_normals
im = imap[econn]
for ii, en in enumerate(e_normals):
normals[im[ii]] += en
# All nodes must have a normal.
if not nm.all(mask[nodes] > 0):
raise ValueError('region %s has not complete faces!' % region.name)
norm = la.norm_l2_along_axis(normals)[:, nm.newaxis]
if (norm < 1e-15).any():
raise ValueError('zero nodal normal! (a node in volume?)')
normals /= norm
if return_imap:
return normals, imap
else:
return normals
def _get_edge_path(graph, seed, mask, cycle=False):
"""
Get a path in an edge graph starting with seed. The mask is incremented by
one at positions of the path vertices.
"""
if mask[seed]:
return []
path = [seed]
mask[seed] = 1
row = graph[seed].indices
nv = len(row)
while nv:
if nv == 2:
if mask[row[0]]:
if mask[row[1]]:
if cycle:
path.append(seed)
break
else:
vert = row[1]
else:
vert = row[0]
elif mask[row[0]]:
break
else:
vert = row[0]
path.append(vert)
mask[vert] = 1
row = graph[vert].indices
nv = len(row)
path = nm.array(path, dtype=nm.int32)
return path
def get_edge_paths(graph, mask):
"""
Get all edge paths in a graph with non-masked vertices. The mask is
updated.
"""
nodes = nm.unique(graph.indices)
npv = nm.diff(graph.indptr)
if npv.max() > 2:
raise ValueError('more than 2 edges sharing a vertex!')
seeds = nm.where(npv == 1)[0]
# 1. get paths.
paths = []
for seed in seeds:
path = _get_edge_path(graph, seed, mask)
if len(path):
paths.append(path)
# 2. get possible remaing cycles.
while 1:
ii = nm.where(mask[nodes] == 0)[0]
if not len(ii):
break
path = _get_edge_path(graph, nodes[ii[0]], mask, cycle=True)
if len(path):
paths.append(path)
return paths
def compute_nodal_edge_dirs(nodes, region, field, return_imap=False):
"""
Nodal edge directions are computed by simple averaging of direction vectors
of edges a node is contained in. Edges are assumed to be straight and a
node must be on a single edge (a border node) or shared by exactly two
edges.
"""
coors = region.domain.mesh.coors
dim = coors.shape[1]
graph = region.get_edge_graph()
imap = prepare_remap(nodes, nodes.max() + 1)
mask = nm.zeros_like(imap)
try:
paths = get_edge_paths(graph, mask)
except ValueError:
raise ValueError('more than 2 edges sharing a vertex in region %s!'
% region.name)
# All nodes must have an edge direction.
if not nm.all(mask[nodes]):
raise ValueError('region %s has not complete edges!' % region.name)
edge_dirs = nm.zeros((nodes.shape[0], dim), dtype=nm.float64)
for path in paths:
pcoors = coors[path]
edirs = nm.diff(pcoors, axis=0)
la.normalize_vectors(edirs, eps=1e-12)
im = imap[nm.c_[path[:-1], path[1:]]]
for ii, edir in enumerate(edirs):
edge_dirs[im[ii]] += edir
la.normalize_vectors(edge_dirs, eps=1e-12)
if return_imap:
return edge_dirs, imap
else:
return edge_dirs
def get_min_value(dofs):
"""
Get a reasonable minimal value of DOFs suitable for extending over a
whole domain.
"""
if dofs.shape[1] > 1: # Vector.
val = 0.0
else: # Scalar.
val = dofs.min()
return val
def extend_cell_data(data, domain, rname, val=None, is_surface=False,
average_surface=True):
"""
Extend cell data defined in a region to the whole domain.
Parameters
----------
data : array
The data defined in the region.
domain : FEDomain instance
The FE domain.
rname : str
The region name.
val : float, optional
The value for filling cells not covered by the region. If not given,
the smallest value in data is used.
is_surface : bool
If True, the data are defined on a surface region. In that case the
values are averaged or summed into the cells containing the region
surface faces (a cell can have several faces of the surface), see
`average_surface`.
average_surface : bool
If True, the data defined on a surface region are averaged, otherwise
the data are summed.
Returns
-------
edata : array
The data extended to all domain elements.
"""
n_el = domain.shape.n_el
if data.shape[0] == n_el: return data
if val is None:
if data.shape[2] > 1: # Vector.
val = nm.amin(nm.abs(data))
else: # Scalar.
val = nm.amin(data)
edata = nm.empty((n_el,) + data.shape[1:], dtype=data.dtype)
edata.fill(val)
region = domain.regions[rname]
if not is_surface:
edata[region.get_cells()] = data
else:
cells = region.get_cells(true_cells_only=False)
ucells = nm.unique(cells)
if len(cells) != len(region.facets):
raise ValueError('region %s has an inner face!'
% region.name)
if average_surface:
avg = nm.bincount(cells, minlength=n_el)[ucells]
else:
avg = 1.0
for ic in range(data.shape[2]):
if nm.isrealobj(data):
evals = nm.bincount(cells, weights=data[:, 0, ic, 0],
minlength=n_el)[ucells]
else:
evals = (nm.bincount(cells, weights=data[:, 0, ic, 0].real,
minlength=n_el)[ucells]
+ 1j *
nm.bincount(cells, weights=data[:, 0, ic, 0].imag,
minlength=n_el)[ucells])
edata[ucells, 0, ic, 0] = evals / avg
return edata
def refine_mesh(filename, level):
"""
Uniformly refine `level`-times a mesh given by `filename`.
The refined mesh is saved to a file with name constructed from base
name of `filename` and `level`-times appended `'_r'` suffix.
Parameters
----------
filename : str
The mesh file name.
level : int
The refinement level.
"""
import os
from sfepy.base.base import output
from sfepy.discrete.fem import Mesh, FEDomain
if level > 0:
mesh = Mesh.from_file(filename)
domain = FEDomain(mesh.name, mesh)
for ii in range(level):
| output('refine %d...' % ii) | sfepy.base.base.output |
import numpy as np
import itertools
import os
import scipy.linalg
from sfepy.discrete import fem
from .algo_core import generalized_courant_fischer, spring_energy_matrix_accelerate_3D
import util.geometry_util as geo_util
import util.meshgen as meshgen
from util.timer import SimpleTimer
from visualization.model_visualizer import visualize_hinges, visualize_3D
import visualization.model_visualizer as vis
from .constraints_3d import select_non_colinear_points, direction_for_relative_disallowed_motions
from .internal_structure import tetrahedron
from .stiffness_matrix import stiffness_matrix_from_mesh
class Model:
"""
Represent an assembly
"""
def __init__(self):
self.beams = []
self.joints = []
def point_matrix(self) -> np.ndarray:
beam_points = np.vstack([b.points for b in self.beams]).reshape(-1, 3)
# joint_points = np.array([j.virtual_points for j in self.joints]).reshape(-1, 3)
return np.vstack((
beam_points,
))
def point_indices(self):
beam_point_count = np.array([b.point_count for b in self.beams])
end_indices = np.cumsum(beam_point_count)
start_indices = end_indices - beam_point_count
return [np.arange(start, end) for start, end in zip(start_indices, end_indices)]
def edge_matrix(self) -> np.ndarray:
edge_indices = []
index_offset = 0
for beam in self.beams:
edge_indices.append(beam.edges + index_offset)
index_offset += beam.point_count
matrix = np.vstack([edges for edges in edge_indices if edges.size > 0])
return matrix
def constraint_matrix(self) -> np.ndarray:
matrix = []
# collect constraints for each joint and stack them
for joint in self.joints:
constraints = joint.linear_constraints(self)
matrix.append(constraints)
numpy_matrix = np.vstack(matrix) if len(matrix) > 0 else np.empty(0)
return numpy_matrix
def constraints_fixing_first_part(self):
count = len(self.beams[0].points)
fixed_coordinates = np.zeros((count * 3, self.point_count * 3))
for r, c in enumerate(range(count * 3)):
fixed_coordinates[r, c] = 1
return fixed_coordinates
@property
def point_count(self):
return sum(beam.point_count for beam in self.beams)
def add_beam(self, beam):
self.beams.append(beam)
def add_beams(self, beams):
for beam in beams:
self.add_beam(beam)
def add_joint(self, joint):
self.joints.append(joint)
def add_joints(self, joints):
for joint in joints:
self.add_joint(joint)
def beam_point_index(self, beam):
beam_index = self.beams.index(beam)
return sum(b.point_count for b in self.beams[:beam_index])
def joint_point_indices(self):
indices = []
for joint in self.joints:
offset_part_1 = self.beam_point_index(joint.part1)
offset_part_2 = self.beam_point_index(joint.part2)
indice_on_part_1 = select_non_colinear_points(joint.part1.points, near=joint.pivot_point)[1] + offset_part_1
indice_on_part_2 = select_non_colinear_points(joint.part2.points, near=joint.pivot_point)[1] + offset_part_2
indices.append((indice_on_part_1, indice_on_part_2))
return np.array(indices)
def save_json(self, filename: str, **kwargs):
import json
from util.json_encoder import ModelEncoder
with open(filename, "w") as f:
json.dump(self, f, cls=ModelEncoder, **kwargs)
def visualize(self, arrows=None, show_axis=True, show_hinge=True, arrow_style=None):
defaults = {
"length_coeff": 0.2,
"radius_coeff": 0.2,
}
if arrow_style is not None:
arrow_style = {
**defaults,
**arrow_style,
}
else:
arrow_style = defaults
geometries = []
model_mesh = vis.get_lineset_for_edges(self.point_matrix(), self.edge_matrix())
geometries.append(model_mesh)
if show_hinge:
rotation_axes_pairs = [(j.pivot, j.rotation_axes[0]) for j in self.joints if j.rotation_axes is not None]
if len(rotation_axes_pairs) > 0:
rotation_pivots, rotation_axes = zip(*rotation_axes_pairs)
axes_arrows = vis.get_mesh_for_arrows(
rotation_pivots,
geo_util.normalize(rotation_axes),
length_coeff=0.01, radius_coeff=0.4)
axes_arrows.paint_uniform_color([0.5, 0.2, 0.8])
geometries.append(axes_arrows)
translation_vector_pairs = [(j.pivot, j.translation_vectors[0]) for j in self.joints if j.translation_vectors is not None]
if len(translation_vector_pairs) > 0:
translation_pivots, translation_vector = zip(*translation_vector_pairs)
vector_arrows = vis.get_mesh_for_arrows(translation_pivots, translation_vector, length_coeff=0.01, radius_coeff=0.4)
vector_arrows.paint_uniform_color([0.2, 0.8, 0.5])
geometries.append(vector_arrows)
melded_points = [j.pivot for j in self.joints if j.translation_vectors is None and j.rotation_axes is None]
if len(melded_points) > 0:
point_meshes = vis.get_mesh_for_points(melded_points)
geometries.append(point_meshes)
mesh_frame = vis.o3d.geometry.TriangleMesh.create_coordinate_frame(size=10, origin=[0, 0, 0])
geometries.append(mesh_frame)
if arrows is not None:
points = self.point_matrix()
arrow_mesh = vis.get_mesh_for_arrows(points, arrows.reshape(-1, points.shape[1]), **arrow_style)
model_meshes = vis.get_geometries_3D(self.point_matrix(), edges=self.edge_matrix(), show_axis=False, show_point=False)
geometries.extend([arrow_mesh, *model_meshes])
vis.o3d.visualization.draw_geometries(geometries)
def joint_stiffness_matrix(self):
from functools import reduce
matrix = reduce(lambda x, y: x + y, [j.joint_stiffness(self) for j in self.joints])
return matrix
def soft_solve(self, num_pairs=-1, extra_constr=None, verbose=False):
points = self.point_matrix()
edges = self.edge_matrix()
part_stiffness = spring_energy_matrix_accelerate_3D(points, edges, abstract_edges=[])
joint_stiffness = self.joint_stiffness_matrix()
K = part_stiffness + joint_stiffness # global stiffness
eigenpairs = geo_util.eigen(K, symmetric=True)
if verbose:
print(self.report())
if num_pairs == -1:
return [(e, v) for e, v in eigenpairs]
else:
return [(e, v) for e, v in eigenpairs[:num_pairs]]
def eigen_solve(self, num_pairs=-1, extra_constr=None, verbose=False):
points = self.point_matrix()
edges = self.edge_matrix()
timer = SimpleTimer()
stiffness = spring_energy_matrix_accelerate_3D(points, edges, abstract_edges=[])
timer.checkpoint("K")
constraints = self.constraint_matrix()
if extra_constr is not None:
constraints = np.vstack((constraints, extra_constr))
K, B = generalized_courant_fischer(stiffness, constraints)
eigenpairs = geo_util.eigen(K, symmetric=True)
timer.checkpoint("eig")
if verbose:
print(self.report())
timer.report()
if num_pairs == -1:
return [(e, B @ v) for e, v in eigenpairs[:]]
else:
return [(e, B @ v) for e, v in eigenpairs[:num_pairs]]
def __str__(self):
return str(self.report())
def report(self) -> dict:
return {
**{
"#parts": len(self.beams),
"#points": self.point_count,
"#joints": len(self.joints),
"#constraints": len(self.constraint_matrix())
},
**vars(self)
}
class Beam:
def __init__(self, points, edges=None, principle_points=None):
if edges is None:
index_range = range(len(points))
edges = np.array(list(itertools.combinations(index_range, 2)))
self._edges = edges
self.points = points
self.principle_points = principle_points
@classmethod
def crystal(cls, p1, p2, crystal_counts):
from solvers.rigidity_solver.internal_structure import get_crystal_vertices
orient = (p2 - p1) / np.linalg.norm(p2 - p1)
crystals = [get_crystal_vertices(c, orient) for c in np.linspace(p1, p2, num=crystal_counts)]
points = np.vstack(crystals)
return Beam(points)
@classmethod
def tetra(cls, p, q, thickness=1, density=0.333333, ori=None):
points, edges = tetrahedron(p, q, thickness=thickness, density=density, ori=ori)
return Beam(points, edges, principle_points=(p, q))
@classmethod
def dense_tetra(cls, p, q, density=0.333333, thickness=1, ori=None):
points, _ = tetrahedron(p, q, density=density, thickness=thickness, ori=ori)
return Beam(points, principle_points=(p, q))
@classmethod
def vertices(cls, points, orient):
orient = orient / np.linalg.norm(orient) * 10
points = np.vstack((points, points + orient))
return Beam(points)
@classmethod
def cube_as_mesh(cls, pivot, u, v, w):
hashes = hash((tuple(pivot), tuple(u), tuple(v), tuple(w)))
soup_filename = f"data/{hashes}.stl"
mesh_filename = f"data/{hashes}.mesh"
import os
if not os.path.exists(mesh_filename):
meshgen.cube_surface_mesh(soup_filename, pivot, u, v, w)
meshgen.tetrahedralize(soup_filename, mesh_filename)
mesh = | fem.Mesh.from_file(mesh_filename) | sfepy.discrete.fem.Mesh.from_file |
import numpy as np
import itertools
import os
import scipy.linalg
from sfepy.discrete import fem
from .algo_core import generalized_courant_fischer, spring_energy_matrix_accelerate_3D
import util.geometry_util as geo_util
import util.meshgen as meshgen
from util.timer import SimpleTimer
from visualization.model_visualizer import visualize_hinges, visualize_3D
import visualization.model_visualizer as vis
from .constraints_3d import select_non_colinear_points, direction_for_relative_disallowed_motions
from .internal_structure import tetrahedron
from .stiffness_matrix import stiffness_matrix_from_mesh
class Model:
"""
Represent an assembly
"""
def __init__(self):
self.beams = []
self.joints = []
def point_matrix(self) -> np.ndarray:
beam_points = np.vstack([b.points for b in self.beams]).reshape(-1, 3)
# joint_points = np.array([j.virtual_points for j in self.joints]).reshape(-1, 3)
return np.vstack((
beam_points,
))
def point_indices(self):
beam_point_count = np.array([b.point_count for b in self.beams])
end_indices = np.cumsum(beam_point_count)
start_indices = end_indices - beam_point_count
return [np.arange(start, end) for start, end in zip(start_indices, end_indices)]
def edge_matrix(self) -> np.ndarray:
edge_indices = []
index_offset = 0
for beam in self.beams:
edge_indices.append(beam.edges + index_offset)
index_offset += beam.point_count
matrix = np.vstack([edges for edges in edge_indices if edges.size > 0])
return matrix
def constraint_matrix(self) -> np.ndarray:
matrix = []
# collect constraints for each joint and stack them
for joint in self.joints:
constraints = joint.linear_constraints(self)
matrix.append(constraints)
numpy_matrix = np.vstack(matrix) if len(matrix) > 0 else np.empty(0)
return numpy_matrix
def constraints_fixing_first_part(self):
count = len(self.beams[0].points)
fixed_coordinates = np.zeros((count * 3, self.point_count * 3))
for r, c in enumerate(range(count * 3)):
fixed_coordinates[r, c] = 1
return fixed_coordinates
@property
def point_count(self):
return sum(beam.point_count for beam in self.beams)
def add_beam(self, beam):
self.beams.append(beam)
def add_beams(self, beams):
for beam in beams:
self.add_beam(beam)
def add_joint(self, joint):
self.joints.append(joint)
def add_joints(self, joints):
for joint in joints:
self.add_joint(joint)
def beam_point_index(self, beam):
beam_index = self.beams.index(beam)
return sum(b.point_count for b in self.beams[:beam_index])
def joint_point_indices(self):
indices = []
for joint in self.joints:
offset_part_1 = self.beam_point_index(joint.part1)
offset_part_2 = self.beam_point_index(joint.part2)
indice_on_part_1 = select_non_colinear_points(joint.part1.points, near=joint.pivot_point)[1] + offset_part_1
indice_on_part_2 = select_non_colinear_points(joint.part2.points, near=joint.pivot_point)[1] + offset_part_2
indices.append((indice_on_part_1, indice_on_part_2))
return np.array(indices)
def save_json(self, filename: str, **kwargs):
import json
from util.json_encoder import ModelEncoder
with open(filename, "w") as f:
json.dump(self, f, cls=ModelEncoder, **kwargs)
def visualize(self, arrows=None, show_axis=True, show_hinge=True, arrow_style=None):
defaults = {
"length_coeff": 0.2,
"radius_coeff": 0.2,
}
if arrow_style is not None:
arrow_style = {
**defaults,
**arrow_style,
}
else:
arrow_style = defaults
geometries = []
model_mesh = vis.get_lineset_for_edges(self.point_matrix(), self.edge_matrix())
geometries.append(model_mesh)
if show_hinge:
rotation_axes_pairs = [(j.pivot, j.rotation_axes[0]) for j in self.joints if j.rotation_axes is not None]
if len(rotation_axes_pairs) > 0:
rotation_pivots, rotation_axes = zip(*rotation_axes_pairs)
axes_arrows = vis.get_mesh_for_arrows(
rotation_pivots,
geo_util.normalize(rotation_axes),
length_coeff=0.01, radius_coeff=0.4)
axes_arrows.paint_uniform_color([0.5, 0.2, 0.8])
geometries.append(axes_arrows)
translation_vector_pairs = [(j.pivot, j.translation_vectors[0]) for j in self.joints if j.translation_vectors is not None]
if len(translation_vector_pairs) > 0:
translation_pivots, translation_vector = zip(*translation_vector_pairs)
vector_arrows = vis.get_mesh_for_arrows(translation_pivots, translation_vector, length_coeff=0.01, radius_coeff=0.4)
vector_arrows.paint_uniform_color([0.2, 0.8, 0.5])
geometries.append(vector_arrows)
melded_points = [j.pivot for j in self.joints if j.translation_vectors is None and j.rotation_axes is None]
if len(melded_points) > 0:
point_meshes = vis.get_mesh_for_points(melded_points)
geometries.append(point_meshes)
mesh_frame = vis.o3d.geometry.TriangleMesh.create_coordinate_frame(size=10, origin=[0, 0, 0])
geometries.append(mesh_frame)
if arrows is not None:
points = self.point_matrix()
arrow_mesh = vis.get_mesh_for_arrows(points, arrows.reshape(-1, points.shape[1]), **arrow_style)
model_meshes = vis.get_geometries_3D(self.point_matrix(), edges=self.edge_matrix(), show_axis=False, show_point=False)
geometries.extend([arrow_mesh, *model_meshes])
vis.o3d.visualization.draw_geometries(geometries)
def joint_stiffness_matrix(self):
from functools import reduce
matrix = reduce(lambda x, y: x + y, [j.joint_stiffness(self) for j in self.joints])
return matrix
def soft_solve(self, num_pairs=-1, extra_constr=None, verbose=False):
points = self.point_matrix()
edges = self.edge_matrix()
part_stiffness = spring_energy_matrix_accelerate_3D(points, edges, abstract_edges=[])
joint_stiffness = self.joint_stiffness_matrix()
K = part_stiffness + joint_stiffness # global stiffness
eigenpairs = geo_util.eigen(K, symmetric=True)
if verbose:
print(self.report())
if num_pairs == -1:
return [(e, v) for e, v in eigenpairs]
else:
return [(e, v) for e, v in eigenpairs[:num_pairs]]
def eigen_solve(self, num_pairs=-1, extra_constr=None, verbose=False):
points = self.point_matrix()
edges = self.edge_matrix()
timer = SimpleTimer()
stiffness = spring_energy_matrix_accelerate_3D(points, edges, abstract_edges=[])
timer.checkpoint("K")
constraints = self.constraint_matrix()
if extra_constr is not None:
constraints = np.vstack((constraints, extra_constr))
K, B = generalized_courant_fischer(stiffness, constraints)
eigenpairs = geo_util.eigen(K, symmetric=True)
timer.checkpoint("eig")
if verbose:
print(self.report())
timer.report()
if num_pairs == -1:
return [(e, B @ v) for e, v in eigenpairs[:]]
else:
return [(e, B @ v) for e, v in eigenpairs[:num_pairs]]
def __str__(self):
return str(self.report())
def report(self) -> dict:
return {
**{
"#parts": len(self.beams),
"#points": self.point_count,
"#joints": len(self.joints),
"#constraints": len(self.constraint_matrix())
},
**vars(self)
}
class Beam:
def __init__(self, points, edges=None, principle_points=None):
if edges is None:
index_range = range(len(points))
edges = np.array(list(itertools.combinations(index_range, 2)))
self._edges = edges
self.points = points
self.principle_points = principle_points
@classmethod
def crystal(cls, p1, p2, crystal_counts):
from solvers.rigidity_solver.internal_structure import get_crystal_vertices
orient = (p2 - p1) / np.linalg.norm(p2 - p1)
crystals = [get_crystal_vertices(c, orient) for c in np.linspace(p1, p2, num=crystal_counts)]
points = np.vstack(crystals)
return Beam(points)
@classmethod
def tetra(cls, p, q, thickness=1, density=0.333333, ori=None):
points, edges = tetrahedron(p, q, thickness=thickness, density=density, ori=ori)
return Beam(points, edges, principle_points=(p, q))
@classmethod
def dense_tetra(cls, p, q, density=0.333333, thickness=1, ori=None):
points, _ = tetrahedron(p, q, density=density, thickness=thickness, ori=ori)
return Beam(points, principle_points=(p, q))
@classmethod
def vertices(cls, points, orient):
orient = orient / np.linalg.norm(orient) * 10
points = np.vstack((points, points + orient))
return Beam(points)
@classmethod
def cube_as_mesh(cls, pivot, u, v, w):
hashes = hash((tuple(pivot), tuple(u), tuple(v), tuple(w)))
soup_filename = f"data/{hashes}.stl"
mesh_filename = f"data/{hashes}.mesh"
import os
if not os.path.exists(mesh_filename):
meshgen.cube_surface_mesh(soup_filename, pivot, u, v, w)
meshgen.tetrahedralize(soup_filename, mesh_filename)
mesh = fem.Mesh.from_file(mesh_filename)
points = mesh.coors
nonzero_x, nonzero_y = mesh.create_conn_graph().nonzero()
edges = np.hstack((nonzero_x.reshape(-1, 1), nonzero_y.reshape(-1, 1)))
beam = Beam(points, edges)
beam.stiffness = stiffness_matrix_from_mesh(mesh_filename)
beam.mesh_filename = mesh_filename
return beam
@classmethod
def from_soup_file(cls, soup_filename: str):
mesh_filename = soup_filename.replace(".obj", ".mesh")
if not os.path.exists(mesh_filename):
meshgen.tetrahedralize(soup_filename, mesh_filename)
beam = cls.from_mesh_file(mesh_filename)
return beam
@classmethod
def from_mesh_file(cls, mesh_filename):
mesh = | fem.Mesh.from_file(mesh_filename) | sfepy.discrete.fem.Mesh.from_file |
from sfepy.base.testing import TestCommon, assert_, debug
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_tensors(self):
import numpy as nm
import sfepy.mechanics.tensors as tn
ok = True
a_full = 2.0 * nm.ones((5,3,3), dtype=nm.float64)
a_sym = 2.0 * nm.ones((5,6), dtype=nm.float64)
_tr = nm.array([6.0] * 5, dtype=nm.float64)
_vt_full = 2.0 * nm.tile(nm.eye(3, dtype=nm.float64), (5,1,1))
_vt_sym = nm.tile(nm.array([2, 2, 2, 0, 0, 0], dtype=nm.float64),
(5,1,1))
_dev_full = a_full - _vt_full
_dev_sym = a_sym - _vt_sym
_vms = 6.0 * nm.ones((5,1), dtype=nm.float64)
tr = | tn.get_trace(a_full, sym_storage=False) | sfepy.mechanics.tensors.get_trace |
from sfepy.base.testing import TestCommon, assert_, debug
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_tensors(self):
import numpy as nm
import sfepy.mechanics.tensors as tn
ok = True
a_full = 2.0 * nm.ones((5,3,3), dtype=nm.float64)
a_sym = 2.0 * nm.ones((5,6), dtype=nm.float64)
_tr = nm.array([6.0] * 5, dtype=nm.float64)
_vt_full = 2.0 * nm.tile(nm.eye(3, dtype=nm.float64), (5,1,1))
_vt_sym = nm.tile(nm.array([2, 2, 2, 0, 0, 0], dtype=nm.float64),
(5,1,1))
_dev_full = a_full - _vt_full
_dev_sym = a_sym - _vt_sym
_vms = 6.0 * nm.ones((5,1), dtype=nm.float64)
tr = tn.get_trace(a_full, sym_storage=False)
_ok = nm.allclose(tr, _tr, rtol=0.0, atol=1e-14)
self.report('trace full: %s' % _ok)
ok = ok and _ok
tr = | tn.get_trace(a_sym, sym_storage=True) | sfepy.mechanics.tensors.get_trace |
from sfepy.base.testing import TestCommon, assert_, debug
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_tensors(self):
import numpy as nm
import sfepy.mechanics.tensors as tn
ok = True
a_full = 2.0 * nm.ones((5,3,3), dtype=nm.float64)
a_sym = 2.0 * nm.ones((5,6), dtype=nm.float64)
_tr = nm.array([6.0] * 5, dtype=nm.float64)
_vt_full = 2.0 * nm.tile(nm.eye(3, dtype=nm.float64), (5,1,1))
_vt_sym = nm.tile(nm.array([2, 2, 2, 0, 0, 0], dtype=nm.float64),
(5,1,1))
_dev_full = a_full - _vt_full
_dev_sym = a_sym - _vt_sym
_vms = 6.0 * nm.ones((5,1), dtype=nm.float64)
tr = tn.get_trace(a_full, sym_storage=False)
_ok = nm.allclose(tr, _tr, rtol=0.0, atol=1e-14)
self.report('trace full: %s' % _ok)
ok = ok and _ok
tr = tn.get_trace(a_sym, sym_storage=True)
ok = ok and nm.allclose(tr, _tr, rtol=0.0, atol=1e-14)
self.report('trace sym: %s' % _ok)
ok = ok and _ok
vt = | tn.get_volumetric_tensor(a_full, sym_storage=False) | sfepy.mechanics.tensors.get_volumetric_tensor |
from sfepy.base.testing import TestCommon, assert_, debug
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_tensors(self):
import numpy as nm
import sfepy.mechanics.tensors as tn
ok = True
a_full = 2.0 * nm.ones((5,3,3), dtype=nm.float64)
a_sym = 2.0 * nm.ones((5,6), dtype=nm.float64)
_tr = nm.array([6.0] * 5, dtype=nm.float64)
_vt_full = 2.0 * nm.tile(nm.eye(3, dtype=nm.float64), (5,1,1))
_vt_sym = nm.tile(nm.array([2, 2, 2, 0, 0, 0], dtype=nm.float64),
(5,1,1))
_dev_full = a_full - _vt_full
_dev_sym = a_sym - _vt_sym
_vms = 6.0 * nm.ones((5,1), dtype=nm.float64)
tr = tn.get_trace(a_full, sym_storage=False)
_ok = nm.allclose(tr, _tr, rtol=0.0, atol=1e-14)
self.report('trace full: %s' % _ok)
ok = ok and _ok
tr = tn.get_trace(a_sym, sym_storage=True)
ok = ok and nm.allclose(tr, _tr, rtol=0.0, atol=1e-14)
self.report('trace sym: %s' % _ok)
ok = ok and _ok
vt = tn.get_volumetric_tensor(a_full, sym_storage=False)
_ok = nm.allclose(vt, _vt_full, rtol=0.0, atol=1e-14)
self.report('volumetric tensor full: %s' % _ok)
ok = ok and _ok
vt = | tn.get_volumetric_tensor(a_sym, sym_storage=True) | sfepy.mechanics.tensors.get_volumetric_tensor |
from sfepy.base.testing import TestCommon, assert_, debug
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_tensors(self):
import numpy as nm
import sfepy.mechanics.tensors as tn
ok = True
a_full = 2.0 * nm.ones((5,3,3), dtype=nm.float64)
a_sym = 2.0 * nm.ones((5,6), dtype=nm.float64)
_tr = nm.array([6.0] * 5, dtype=nm.float64)
_vt_full = 2.0 * nm.tile(nm.eye(3, dtype=nm.float64), (5,1,1))
_vt_sym = nm.tile(nm.array([2, 2, 2, 0, 0, 0], dtype=nm.float64),
(5,1,1))
_dev_full = a_full - _vt_full
_dev_sym = a_sym - _vt_sym
_vms = 6.0 * nm.ones((5,1), dtype=nm.float64)
tr = tn.get_trace(a_full, sym_storage=False)
_ok = nm.allclose(tr, _tr, rtol=0.0, atol=1e-14)
self.report('trace full: %s' % _ok)
ok = ok and _ok
tr = tn.get_trace(a_sym, sym_storage=True)
ok = ok and nm.allclose(tr, _tr, rtol=0.0, atol=1e-14)
self.report('trace sym: %s' % _ok)
ok = ok and _ok
vt = tn.get_volumetric_tensor(a_full, sym_storage=False)
_ok = nm.allclose(vt, _vt_full, rtol=0.0, atol=1e-14)
self.report('volumetric tensor full: %s' % _ok)
ok = ok and _ok
vt = tn.get_volumetric_tensor(a_sym, sym_storage=True)
_ok = nm.allclose(vt, _vt_sym, rtol=0.0, atol=1e-14)
self.report('volumetric tensor sym: %s' % _ok)
ok = ok and _ok
dev = | tn.get_deviator(a_full, sym_storage=False) | sfepy.mechanics.tensors.get_deviator |
from sfepy.base.testing import TestCommon, assert_, debug
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_tensors(self):
import numpy as nm
import sfepy.mechanics.tensors as tn
ok = True
a_full = 2.0 * nm.ones((5,3,3), dtype=nm.float64)
a_sym = 2.0 * nm.ones((5,6), dtype=nm.float64)
_tr = nm.array([6.0] * 5, dtype=nm.float64)
_vt_full = 2.0 * nm.tile(nm.eye(3, dtype=nm.float64), (5,1,1))
_vt_sym = nm.tile(nm.array([2, 2, 2, 0, 0, 0], dtype=nm.float64),
(5,1,1))
_dev_full = a_full - _vt_full
_dev_sym = a_sym - _vt_sym
_vms = 6.0 * nm.ones((5,1), dtype=nm.float64)
tr = tn.get_trace(a_full, sym_storage=False)
_ok = nm.allclose(tr, _tr, rtol=0.0, atol=1e-14)
self.report('trace full: %s' % _ok)
ok = ok and _ok
tr = tn.get_trace(a_sym, sym_storage=True)
ok = ok and nm.allclose(tr, _tr, rtol=0.0, atol=1e-14)
self.report('trace sym: %s' % _ok)
ok = ok and _ok
vt = tn.get_volumetric_tensor(a_full, sym_storage=False)
_ok = nm.allclose(vt, _vt_full, rtol=0.0, atol=1e-14)
self.report('volumetric tensor full: %s' % _ok)
ok = ok and _ok
vt = tn.get_volumetric_tensor(a_sym, sym_storage=True)
_ok = nm.allclose(vt, _vt_sym, rtol=0.0, atol=1e-14)
self.report('volumetric tensor sym: %s' % _ok)
ok = ok and _ok
dev = tn.get_deviator(a_full, sym_storage=False)
_ok = nm.allclose(dev, _dev_full, rtol=0.0, atol=1e-14)
self.report('deviator full: %s' % _ok)
ok = ok and _ok
aux = (dev * nm.transpose(dev, (0, 2, 1))).sum(axis=1).sum(axis=1)
vms2 = nm.sqrt((3.0/2.0) * aux)[:,None]
dev = | tn.get_deviator(a_sym, sym_storage=True) | sfepy.mechanics.tensors.get_deviator |
from sfepy.base.testing import TestCommon, assert_, debug
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_tensors(self):
import numpy as nm
import sfepy.mechanics.tensors as tn
ok = True
a_full = 2.0 * nm.ones((5,3,3), dtype=nm.float64)
a_sym = 2.0 * nm.ones((5,6), dtype=nm.float64)
_tr = nm.array([6.0] * 5, dtype=nm.float64)
_vt_full = 2.0 * nm.tile(nm.eye(3, dtype=nm.float64), (5,1,1))
_vt_sym = nm.tile(nm.array([2, 2, 2, 0, 0, 0], dtype=nm.float64),
(5,1,1))
_dev_full = a_full - _vt_full
_dev_sym = a_sym - _vt_sym
_vms = 6.0 * nm.ones((5,1), dtype=nm.float64)
tr = tn.get_trace(a_full, sym_storage=False)
_ok = nm.allclose(tr, _tr, rtol=0.0, atol=1e-14)
self.report('trace full: %s' % _ok)
ok = ok and _ok
tr = tn.get_trace(a_sym, sym_storage=True)
ok = ok and nm.allclose(tr, _tr, rtol=0.0, atol=1e-14)
self.report('trace sym: %s' % _ok)
ok = ok and _ok
vt = tn.get_volumetric_tensor(a_full, sym_storage=False)
_ok = nm.allclose(vt, _vt_full, rtol=0.0, atol=1e-14)
self.report('volumetric tensor full: %s' % _ok)
ok = ok and _ok
vt = tn.get_volumetric_tensor(a_sym, sym_storage=True)
_ok = nm.allclose(vt, _vt_sym, rtol=0.0, atol=1e-14)
self.report('volumetric tensor sym: %s' % _ok)
ok = ok and _ok
dev = tn.get_deviator(a_full, sym_storage=False)
_ok = nm.allclose(dev, _dev_full, rtol=0.0, atol=1e-14)
self.report('deviator full: %s' % _ok)
ok = ok and _ok
aux = (dev * nm.transpose(dev, (0, 2, 1))).sum(axis=1).sum(axis=1)
vms2 = nm.sqrt((3.0/2.0) * aux)[:,None]
dev = tn.get_deviator(a_sym, sym_storage=True)
_ok = nm.allclose(dev, _dev_sym, rtol=0.0, atol=1e-14)
self.report('deviator sym: %s' % _ok)
ok = ok and _ok
vms = | tn.get_von_mises_stress(a_full, sym_storage=False) | sfepy.mechanics.tensors.get_von_mises_stress |
from sfepy.base.testing import TestCommon, assert_, debug
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_tensors(self):
import numpy as nm
import sfepy.mechanics.tensors as tn
ok = True
a_full = 2.0 * nm.ones((5,3,3), dtype=nm.float64)
a_sym = 2.0 * nm.ones((5,6), dtype=nm.float64)
_tr = nm.array([6.0] * 5, dtype=nm.float64)
_vt_full = 2.0 * nm.tile(nm.eye(3, dtype=nm.float64), (5,1,1))
_vt_sym = nm.tile(nm.array([2, 2, 2, 0, 0, 0], dtype=nm.float64),
(5,1,1))
_dev_full = a_full - _vt_full
_dev_sym = a_sym - _vt_sym
_vms = 6.0 * nm.ones((5,1), dtype=nm.float64)
tr = tn.get_trace(a_full, sym_storage=False)
_ok = nm.allclose(tr, _tr, rtol=0.0, atol=1e-14)
self.report('trace full: %s' % _ok)
ok = ok and _ok
tr = tn.get_trace(a_sym, sym_storage=True)
ok = ok and nm.allclose(tr, _tr, rtol=0.0, atol=1e-14)
self.report('trace sym: %s' % _ok)
ok = ok and _ok
vt = tn.get_volumetric_tensor(a_full, sym_storage=False)
_ok = nm.allclose(vt, _vt_full, rtol=0.0, atol=1e-14)
self.report('volumetric tensor full: %s' % _ok)
ok = ok and _ok
vt = tn.get_volumetric_tensor(a_sym, sym_storage=True)
_ok = nm.allclose(vt, _vt_sym, rtol=0.0, atol=1e-14)
self.report('volumetric tensor sym: %s' % _ok)
ok = ok and _ok
dev = tn.get_deviator(a_full, sym_storage=False)
_ok = nm.allclose(dev, _dev_full, rtol=0.0, atol=1e-14)
self.report('deviator full: %s' % _ok)
ok = ok and _ok
aux = (dev * nm.transpose(dev, (0, 2, 1))).sum(axis=1).sum(axis=1)
vms2 = nm.sqrt((3.0/2.0) * aux)[:,None]
dev = tn.get_deviator(a_sym, sym_storage=True)
_ok = nm.allclose(dev, _dev_sym, rtol=0.0, atol=1e-14)
self.report('deviator sym: %s' % _ok)
ok = ok and _ok
vms = tn.get_von_mises_stress(a_full, sym_storage=False)
_ok = nm.allclose(vms, _vms, rtol=0.0, atol=1e-14)
self.report('von Mises stress full: %s' % _ok)
ok = ok and _ok
vms = | tn.get_von_mises_stress(a_sym, sym_storage=True) | sfepy.mechanics.tensors.get_von_mises_stress |
# AtrialFibrePlugin
# Copyright (C) 2018 <NAME>, King's College London, all rights reserved, see LICENSE file
'''
Atrial fibre generation plugin.
'''
import os
import stat
import ast
import shutil
import datetime
import zipfile
import warnings
from itertools import starmap
from collections import defaultdict
try:
import configparser
except ImportError:
import ConfigParser as configparser
try:
from sfepy.base.conf import ProblemConf
from sfepy.applications import solve_pde
from sfepy.base.base import output
except ImportError:
warnings.warn('SfePy needs to be installed or in PYTHONPATH to generate fiber directions.')
from eidolon import ( ui,
ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType,
listToMatrix,MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush
)
import eidolon
import numpy as np
from scipy.spatial import cKDTree
plugindir= os.path.dirname(os.path.abspath(__file__)) # this file's directory
# directory/file names
uifile=os.path.join(plugindir,'AtrialFibrePlugin.ui')
deformDir=os.path.join(plugindir,'deformetricaC')
deformExe=os.path.join(deformDir,'deformetrica')
architecture=os.path.join(plugindir,'architecture.ini')
problemFile=os.path.join(plugindir,'problemfile.py')
# registration file names
decimatedFile='subject.vtk'
targetFile='target.vtk'
datasetFile='data_set.xml'
modelFile='model.xml'
optimFile='optimization_parameters.xml'
registeredFile='Registration_subject_to_subject_0__t_9.vtk'
decimate='decimate-surface'
# deformetrica parameters
kernelWidthSub=5000
kernelWidthDef=8000
kernelType='cudaexact'
dataSigma=0.1
stepSize=0.000001
# field names
#regionField='regions'
#landmarkField='landmarks'
#directionField='directions'
#gradientDirField='gradientDirs'
#elemDirField='elemDirs'
#elemRegionField='elemRegions'
#elemthickness='elemThickness'
#elemGradient='elemGradient'
fieldNames=eidolon.enum(
'regions','landmarks',
'directions','gradientDirs',
'elemDirs','elemRegions','elemThickness',
'nodeGradient'
)
objNames=eidolon.enum(
'atlasmesh',
'origmesh',
'epimesh','epinodes',
'endomesh','endonodes',
'architecture'
)
regTypes=eidolon.enum('endo','epi')
# load the UI file into the ui namespace, this is subtyped below
ui.loadUI(open(uifile).read())
def showLines(nodes,lines,name='Lines',matname='Default'):
mgr=eidolon.getSceneMgr()
lineds=eidolon.LineDataSet(name+'DS',nodes,lines)
obj=MeshSceneObject(name,lineds)
mgr.addSceneObject(obj)
rep=obj.createRepr(eidolon.ReprType._line,matname=matname)
mgr.addSceneObjectRepr(rep)
return obj,rep
def showElemDirs(obj,glyphscale,mgr):
ds=obj.datasets[0]
nodes=ds.getNodes()
tets=first(i for i in ds.enumIndexSets() if i.getType()==ElemType._Tet1NL)
elemdirfield=ds.getDataField(fieldNames._elemDirs)
mid=ElemType.Tet1NL.basis(0.25,0.25,0.25)
elemnodes=[ElemType.Tet1NL.applyCoeffs([nodes[e] for e in elem],mid) for elem in tets]
elemobj=MeshSceneObject('elemobj',PyDataSet('elemobjds',elemnodes,[],[elemdirfield]))
mgr.addSceneObject(elemobj)
rep=elemobj.createRepr(eidolon.ReprType._glyph,0,externalOnly=False,drawInternal=True,glyphname='arrow',
glyphscale=glyphscale,dfield=elemdirfield.getName(),vecfunc=eidolon.VecFunc._Linear)
mgr.addSceneObjectRepr(rep)
class initdict(defaultdict):
def __init__(self,initfunc,*args,**kwargs):
defaultdict.__init__(self,None,*args,**kwargs)
self.initfunc=initfunc
def __missing__(self,key):
value=self.initfunc(key)
self.__setitem__(key,value)
return value
class plane(object):
def __init__(self,center,norm):
self.center=center
self.norm=norm.norm()
def dist(self,pt):
return pt.planeDist(self.center,self.norm)
def moveUp(self,dist):
self.center+=self.norm*dist
def numPointsAbove(self,nodes):
return sum(1 for n in nodes if self.dist(n)>=0)
def between(self,nodes,otherplane):
numnodes=len(nodes)
return self.numPointsAbove(nodes)==numnodes and otherplane.numPointsAbove(nodes)==numnodes
def findIntersects(self,nodes,inds):
numnodes=inds.m()
result=[]
for n in range(inds.n()):
if 0<self.numPointsAbove(nodes.mapIndexRow(inds,n))<numnodes:
result.append(n)
return result
class TriMeshGraph(object):
def __init__(self,nodes,tris,ocdepth=3):
self.nodes=nodes if isinstance(nodes,eidolon.Vec3Matrix) else listToMatrix(nodes,'nodes')
self.tris=tris if isinstance(tris,eidolon.IndexMatrix) else listToMatrix(tris,'tris')
self.tricenters=[avg(self.getTriNodes(r),vec3()) for r in range(self.tris.n())]
self.adj,self.ragged=generateTriAdj(self.tris) # elem -> elems
self.nodeelem=generateNodeElemMap(self.nodes.n(),self.tris) # node -> elems
self.edges=generateSimplexEdgeMap(self.nodes.n(),self.tris) # node -> nodes
self.boundbox=BoundBox(nodes)
# self.octree=eidolon.Octree(ocdepth,self.boundbox.getDimensions(),self.boundbox.center)
# self.octree.addMesh(self.nodes,self.tris)
def computeDist(key):
i,j=key
return self.tricenters[i].distTo(self.tricenters[j])
self.tridists=initdict(computeDist)
# def getIntersectedTri(self,start,end):
# '''
# Returns the triangle index and the (t,u,v) triple if the line from `start' to `end' intersects the indexed triangle
# at a distance of `t' from `start' with xi coord (u,v). Returns None if no triangle intersected.
# '''
# startoc=self.octree.getLeaf(start)
# endoc=self.octree.getLeaf(end)
# inds=(startoc.leafdata if startoc is not None else []) + (endoc.leafdata if endoc is not None else [])
#
# r=eidolon.Ray(start,end-start)
#
# for tri in inds:
# d=r.intersectsTri(*self.getTriNodes(tri))
# if d:# and d[0]<=start.distTo(end):
# return tri,d
#
# return None
def getPathSubgraph(self,starttri,endtri):
return getAdjTo(self.adj,starttri,endtri)
def getTriNodes(self,triindex):
return self.nodes.mapIndexRow(self.tris,triindex)
def getTriNorm(self,triindex):
a,b,c=self.getTriNodes(triindex)
return a.planeNorm(b,c)
def getSharedNodeTris(self,triindex):
tris=set()
for n in self.tris[triindex]:
tris.update(self.nodeelem[n])
tris.remove(triindex)
return list(sorted(tris))
def getNearestTri(self,pt):
def triDist(tri):
norm=self.getTriNorm(tri)
pt=self.tricenters[tri]
return abs(pt.planeDist(pt,norm))
nearestnode=min([n for n in range(self.nodes.n()) if self.nodeelem[n]],key=lambda n:self.nodes[n].distToSq(pt))
tris=self.nodeelem[nearestnode]
return min(tris,key=triDist)
def getPath(self,starttri,endtri,acceptTri=None):
return dijkstra(self.adj,starttri,endtri,lambda i,j:self.tridists[(i,j)],acceptTri)
def loadArchitecture(path,section):
'''
Load the architecture from the given file `path' and return values from the given section (endo or epi). The
return value is a tuple containing:
landmarks: 0-based indices of landmark nodes in the atlas
lmlines : 0-based index pairs defining lines between indices in landmarks
lmregions: a list of maps, each map defining a region which are mappings from a 0-based indices into lmlines to
the line's landmark index pair
lmstim : a per-region specifier list stating which lines (L# for index #) or atlas node (N#) defines stimulation
lground : a per-region specifier list stating which lines (L# for index #) or atlas node (N#) defines ground
appendageregion: region number for the appendage
appendagenode: node index for the appendage's division node which must be generated
'''
c=configparser.SafeConfigParser()
assert len(c.read(path))>0
landmarks=ast.literal_eval(c.get(section,'landmarks')) # 0-based node indices
lines=ast.literal_eval(c.get(section,'lines')) # 1-based landmark indices
regions=ast.literal_eval(c.get(section,'regions')) # 1-based landmark indices
stimulus=ast.literal_eval(c.get(section,'stimulus')) # per region
ground=ast.literal_eval(c.get(section,'ground')) # per region
appendageregion=ast.literal_eval(c.get(section,'appendageregion'))
appendagenode=ast.literal_eval(c.get(section,'appendagenode'))
appendagelmindex=ast.literal_eval(c.get(section,'appendagelmindex'))
# types=ast.literal_eval(c.get(section,'type')) # per region
# indices that don't exist are for landmarks that need to be calculated
lmlines=[subone(l) for l in lines ]#if max(l)<=len(landmarks)] # filter for lines with existing node indices
lmregions=[subone(r) for r in regions]
# lmregions=[subone(r) for r in regions if all(i<=len(landmarks) for i in r)]
lmstim=stimulus#[:len(lmregions)]
lmground=ground#[:len(lmregions)]
allregions=[]
for r in lmregions:
lr={i:(a,b) for i,(a,b) in enumerate(lmlines) if a in r and b in r}
if len(lr)>2:
allregions.append(lr)
return landmarks,lmlines,allregions,lmstim,lmground, appendageregion,appendagenode,appendagelmindex
def writeMeshFile(filename,nodes,inds,nodegroup,indgroup,dim):
'''Write a medit format mesh file to `filename'.'''
with open(filename,'w') as o:
print('MeshVersionFormatted 1',file=o)
print('Dimension %i'%dim,file=o)
print('Vertices',file=o)
print(len(nodes),file=o)
for n in range(len(nodes)):
for v in tuple(nodes[n])[:dim]:
print('%20.10f'%v,end=' ',file=o)
group=0 if nodegroup is None else nodegroup[n]
print(group,file=o)
print('Triangles' if len(inds[0])==3 else 'Tetrahedra',file=o)
print(len(inds),file=o)
for n in range(len(inds)):
print(*['%10i'%(t+1) for t in inds[n]],file=o,end=' ')
group=0 if indgroup is None else indgroup[n]
print(group,file=o)
def createNodeQuery(nodes):
'''
Create a cKDTree object from `nodes' and return a query function which accepts a position and radius value. The
function will return the nearest point index to the given position if radius<=0 and a list of indices of points
within the given radius of the position otherwise. The node list is also returned as a second return value.
'''
tree=cKDTree(np.asarray(list(map(tuple,nodes))))
def _query(pos,radius=0):
'''Query `nodes' for the nearest node to `pos' if `radius'<=0 or a list of those within `radius' otherwise.'''
pos=tuple(pos)
if radius<=0:
return tree.query(pos)[1],tree.data
else:
return tree.query_ball_point(pos,radius),tree.data
return _query
def registerSubjectToTarget(subjectObj,targetObj,targetTrans,outdir,decimpath,VTK):
'''
Register the `subjectObj' mesh object to `targetObj' mesh object putting data into directory `outdir'. The subject
will be decimated to have roughly the same number of nodes as the target and then stored as subject.vtk in `outdir'.
Registration is done with Deformetrica and result stored as 'Registration_subject_to_subject_0__t_9.vtk' in `outdir'.
If `targetTrans' must be None or a transform which is applied to the nodes of `targetObj' before registration.
'''
dpath=os.path.join(outdir,decimatedFile)
tmpfile=os.path.join(outdir,'tmp.vtk')
# if a transform is given, apply that transform to the target mesh when saving otherwise do no transformation
vecfunc=(lambda i: tuple(targetTrans*i)) if targetTrans else None
shutil.copy(os.path.join(deformDir,datasetFile),os.path.join(outdir,datasetFile)) # copy dataset file unchanged
model=open(os.path.join(deformDir,modelFile)).read()
model=model.replace('%1',str(dataSigma))
model=model.replace('%2',str(kernelWidthSub))
model=model.replace('%3',str(kernelType))
model=model.replace('%4',str(kernelWidthDef))
with open(os.path.join(outdir,modelFile),'w') as o: # save modified model file
o.write(model)
optim=open(os.path.join(deformDir,optimFile)).read()
optim=optim.replace('%1',str(stepSize))
with open(os.path.join(outdir,optimFile),'w') as o: # save modified optimization file
o.write(optim)
VTK.saveLegacyFile(tmpfile,subjectObj,datasettype='POLYDATA')
VTK.saveLegacyFile(os.path.join(outdir,targetFile),targetObj,datasettype='POLYDATA',vecfunc=vecfunc)
snodes=subjectObj.datasets[0].getNodes()
tnodes=targetObj.datasets[0].getNodes()
sizeratio=float(tnodes.n())/snodes.n()
sizepercent=str(100*(1-sizeratio))[:6] # percent to decimate by
# decimate the mesh most of the way towards having the same number of nodes as the target
ret,output=eidolon.execBatchProgram(decimpath,tmpfile,dpath,'-reduceby',sizepercent,'-ascii',logcmd=True)
assert ret==0,output
env={'LD_LIBRARY_PATH':deformDir}
ret,output=eidolon.execBatchProgram(deformExe,"registration", "3D", modelFile, datasetFile, optimFile, "--output-dir=.",cwd=outdir,env=env,logcmd=True)
assert ret==0,output
return output
def transferLandmarks(archFilename,fieldname,targetObj,targetTrans,subjectObj,outdir,VTK):
'''
Register the landmarks defined as node indices on `targetObj' to equivalent node indices on `subjectObj' via the
decimated and registered intermediary stored in `outdir'. The result is a list of index pairs associating a node
index in `subjectObj' for every landmark index in `targetObj'.
'''
decimated=os.path.join(outdir,decimatedFile)
registered=os.path.join(outdir,registeredFile)
arch=loadArchitecture(archFilename,fieldname)
lmarks,lines=arch[:2]
appendagelmindex=arch[-1]
# append the index for the estimated appendage node, this will have to be adjusted manually after registration
lmarks.append(appendagelmindex)
reg=VTK.loadObject(registered) # mesh registered to target
dec=VTK.loadObject(decimated) # decimated unregistered mesh
tnodes=targetObj.datasets[0].getNodes() # target points
rnodes=reg.datasets[0].getNodes() # registered decimated points
dnodes=dec.datasets[0].getNodes() # unregistered decimated points
snodes=subjectObj.datasets[0].getNodes() # original subject points
targetTrans=targetTrans or eidolon.transform()
lmpoints=[(targetTrans*tnodes[m],m) for m in lmarks] # (transformed landmark node, index) pairs
# find the points in the registered mesh closest to the landmark points in the target object
query=createNodeQuery(rnodes)
rpoints=[(query(pt)[0],m) for pt,m in lmpoints]
# find the subject nodes closes to landmark points in the decimated mesh (which are at the same indices as in the registered mesh)
query=createNodeQuery(snodes)
spoints=[(query(dnodes[i])[0],m) for i,m in rpoints]
assert len(spoints)==len(lmpoints)
assert all(p[0] is not None for p in spoints)
slines=[l for l in lines if max(l)<len(spoints)]
return spoints,slines # return list (i,m) pairs where node index i in the subject mesh is landmark m
def generateTriAdj(tris):
'''
Generates a table (n,3) giving the indices of adjacent triangles for each triangle, with a value of `n' indicating a
free edge. The indices in each row are in sorted order rather than per triangle edge. The result is the dual of the
triangle mesh represented as the (n,3) array and a map relating the mesh's ragged edges to their triangle.
'''
edgemap = {} # maps edges to the first triangle having that edge
result=IndexMatrix(tris.getName()+'Adj',tris.n(),3)
result.fill(tris.n())
# Find adjacent triangles by constructing a map from edges defined by points (a,b) to the triangle having that edge,
# when that edge is encountered twice then the current triangle is adjacent to the one that originally added the edge.
for t1,tri in enumerate(tris): # iterate over each triangle t1
for a,b in successive(tri,2,True): # iterate over each edge (a,b) of t1
k=(min(a,b),max(a,b)) # key has uniform edge order
t2=edgemap.pop(k,None) # attempt to find edge k in the map, None indicates edge not found
if t2 is not None: # an edge is shared if already encountered, thus t1 is adjacent to t2
result[t1]=sorted(set(result[t1]+(t2,)))
result[t2]=sorted(set(result[t2]+(t1,)))
else:
edgemap[k]=t1 # first time edge is encountered, associate this triangle with it
return result,edgemap
@timing
def getAdjTo(adj,start,end):
'''
Returns a subgraph of `adj',represented as a node->[neighbours] dict, which includes nodes `start' and `end'.
If `end' is None or an index not appearing in the mesh, the result will be the submesh contiguous with `start'.
'''
visiting=set([start])
found={}
numnodes=adj.n()
while visiting and end not in found:
visit=visiting.pop()
neighbours=[n for n in adj[visit] if n<numnodes]
found[visit]=neighbours
visiting.update(n for n in neighbours if n not in found)
return found
def generateNodeElemMap(numnodes,tris):
'''Returns a list relating each node index to the set of element indices using that node.'''
nodemap=[set() for _ in range(numnodes)]
for i,tri in enumerate(tris):
for n in tri:
nodemap[n].add(i)
#assert all(len(s)>0 for s in nodemap), 'Unused nodes in triangle topology'
return nodemap
def generateSimplexEdgeMap(numnodes,simplices):
'''
Returns a list relating each node index to the set of node indices joined to it by graph edges. This assumes the mesh
has `numnodes' number of nodes and simplex topology `simplices'.
'''
nodemap=[set() for _ in range(numnodes)]
for simplex in simplices:
simplex=set(simplex)
for s in simplex:
nodemap[s].update(simplex.difference((s,)))
return nodemap
@timing
def dijkstra(adj, start, end,distFunc,acceptTri=None):
#http://benalexkeen.com/implementing-djikstras-shortest-path-algorithm-with-python/
# shortest paths is a dict of nodes to previous node and distance
paths = {start: (None,0)}
curnode = start
visited = set()
# consider only subgraph containing start and end, this expands geometrically so should contain the minimal path
adj=getAdjTo(adj,start,end)
eidolon.printFlush(len(adj))
if acceptTri is not None:
accept=lambda a: (a in adj and acceptTri(a))
else:
accept=lambda a: a in adj
while curnode != end:
visited.add(curnode)
destinations = list(filter(accept,adj[curnode]))
curweight = paths[curnode][1]
for dest in destinations:
weight = curweight+distFunc(curnode,dest)
if dest not in paths or weight < paths[dest][1]:
paths[dest] = (curnode, weight)
nextnodes = {node: paths[node] for node in paths if node not in visited}
if not nextnodes:
raise ValueError("Route %i -> %i not possible"%(start,end))
# next node is the destination with the lowest weight
curnode = min(nextnodes, key=lambda k:nextnodes[k][1])
# collect path from end node back to the start
path = []
while curnode is not None:
path.insert(0,curnode)
curnode = paths[curnode][0]
return path
def subone(v):
return tuple(i-1 for i in v)
def findNearestIndex(pt,nodelist):
return min(range(len(nodelist)),key=lambda i:pt.distToSq(nodelist[i]))
def findFarthestIndex(pt,nodelist):
return max(range(len(nodelist)),key=lambda i:pt.distToSq(nodelist[i]))
def getContiguousTris(graph,starttri,acceptTri):
accepted=[starttri]
adjacent=first(i for i in graph.getSharedNodeTris(starttri) if i not in accepted and acceptTri(i))
while adjacent is not None:
accepted.append(adjacent)
for a in accepted[::-1]:
allneighbours=graph.getSharedNodeTris(a)
adjacent=first(i for i in allneighbours if i not in accepted and acceptTri(i))
if adjacent:
break
return accepted
@timing
def findTrisBetweenNodes(start,end,landmarks,graph):
eidolon.printFlush('Nodes:',start,end)
start=landmarks[start]
end=landmarks[end]
assert 0<=start<len(graph.nodeelem)
assert 0<=end<len(graph.nodeelem)
starttri=first(graph.nodeelem[start])
endtri=first(graph.nodeelem[end])
assert starttri is not None
assert endtri is not None
nodes=graph.nodes
startnode=nodes[start]
endnode=nodes[end]
easypath= graph.getPath(starttri,endtri)
midnode=graph.tricenters[easypath[len(easypath)//2]]
# define planes to bound the areas to search for triangles to within the space of the line
splane=plane(startnode,midnode-startnode)
eplane=plane(endnode,midnode-endnode)
# adjust the plane's positions to account for numeric error
adjustdist=1e1
splane.moveUp(-adjustdist)
eplane.moveUp(-adjustdist)
assert starttri is not None
assert endtri is not None
# TODO: plane normal determination still needs work
#linenorm=midnode.planeNorm(startnode,endnode)
#linenorm=graph.getTriNorm(easypath[len(easypath)//2]).cross(midnode-startnode)
linenorm=eidolon.avg(graph.getTriNorm(e) for e in easypath).cross(midnode-startnode)
lineplane=plane(splane.center,linenorm)
indices=set([starttri,endtri]) # list of element indices on lineplane between splane and eplane
for i in range(graph.tris.n()):
trinodes=graph.getTriNodes(i)
numabove=lineplane.numPointsAbove(trinodes)
if numabove in (1,2) and splane.between(trinodes,eplane):
indices.add(i)
accepted=getContiguousTris(graph,starttri,lambda i:i in indices)
if endtri not in accepted or len(easypath)<len(accepted):
eidolon.printFlush('---Resorting to easypath')
accepted=easypath
return accepted
@timing
def assignRegion(region,index,assignmat,landmarks,linemap,graph):
def getEnclosedGraph(adj,excludes,start):
visiting=set([start])
found=set()
numnodes=adj.n()
assert start is not None
while visiting:
visit=visiting.pop()
neighbours=[n for n in adj.getRow(visit) if n<numnodes and n not in excludes]
found.add(visit)
visiting.update(n for n in neighbours if n not in found)
return found
# collect all tri indices on the border of this region
bordertris=set()
for lineindex,(a,b) in region.items():
if (a,b) in linemap:
line=linemap[(a,b)]
else:
line=findTrisBetweenNodes(a,b,landmarks,graph)
linemap[(a,b)]=line
linemap[(b,a)]=line
# assign line ID to triangles on the line
for tri in line:
assignmat[tri,0]=lineindex
bordertris.update(line)
bordertri=graph.tricenters[first(bordertris)]
farthest=max(range(len(graph.tris)),key=lambda i:graph.tricenters[i].distToSq(bordertri))
maxgraph=getEnclosedGraph(graph.adj,bordertris,farthest)
for tri in range(len(graph.tris)):
if tri in bordertris or tri not in maxgraph:
if assignmat[tri,1]<0:
assignmat[tri,1]=index
elif assignmat[tri,2]<0:
assignmat[tri,2]=index
elif assignmat[tri,3]<0:
assignmat[tri,3]=index
@timing
def generateRegionField(obj,landmarkObj,regions,appendageregion,appendagenode,task=None):
ds=obj.datasets[0]
nodes=ds.getNodes()
tris=first(ind for ind in ds.enumIndexSets() if ind.m()==3 and bool(ind.meta(StdProps._isspatial)))
lmnodes=landmarkObj.datasets[0].getNodes()
linemap={}
landmarks={i:nodes.indexOf(lm)[0] for i,lm in enumerate(lmnodes)}
assert all(0<=l<nodes.n() for l in landmarks)
graph=TriMeshGraph(nodes,tris)
edgenodeinds=set(eidolon.listSum(graph.ragged)) # list of all node indices on the ragged edge
filledregions=RealMatrix(fieldNames._regions,tris.n(),4)
filledregions.meta(StdProps._elemdata,'True')
filledregions.fill(-10)
#landmarks[appendagenode]=0 # TODO: skipping appendage node for now
for region in regions:
for a,b in region.values():
if appendagenode not in (a,b):
if a in landmarks and b not in landmarks:
oldlmnode=nodes[landmarks[a]]
newlm=b
elif b in landmarks and a not in landmarks:
oldlmnode=nodes[landmarks[b]]
newlm=a
else:
continue
newlmnode=min(edgenodeinds,key=lambda i:nodes[i].distToSq(oldlmnode)) # ragged edge node closest to landmark
landmarks[newlm]=newlmnode
# eidolon.printFlush(newlm,newlmnode,graph.getPath(min(a,b),newlmnode),'\n')
# line=findTrisBetweenNodes(a,b,landmarks,graph)
# for tri in line:
# filledregions[tri,0]=max(a,b)
if task:
task.setMaxProgress(len(regions))
for rindex,region in enumerate(regions):
eidolon.printFlush('Region',rindex,'of',len(regions),region)
allnodes=set(eidolon.listSum(region.values()))
if all(a in landmarks for a in allnodes):
assignRegion(region,rindex,filledregions,landmarks,linemap,graph)
else:
eidolon.printFlush('Skipping',rindex,[a for a in allnodes if a not in landmarks])
if task:
task.setProgress(rindex+1)
return filledregions,linemap
def extractTriRegion(nodes,tris,acceptFunc):
'''
Extract the region from the mesh (nodes,tris) as defined by the triangle acceptance function `acceptFunc'. The return
value is a tuple containing the list of new nodes, a list of new tris, a map from old node indices in `nodes' to new
indices in the returned node list, and a map from triangle indices in `tris' to new ones in the returned triangle list.
'''
#old -> new
newnodes=[] # new node set
newtris=[] # new triangle set
nodemap={} # maps old node indices to new
trimap={} # maps old triangle indices to new
for tri in range(len(tris)):
if acceptFunc(tri):
newtri=list(tris[tri])
for i,n in enumerate(newtri):
if n not in nodemap:
nodemap[n]=len(newnodes)
newnodes.append(nodes[n])
newtri[i]=nodemap[n]
trimap[tri]=len(newtris)
newtris.append(newtri)
return newnodes,newtris,nodemap,trimap
def calculateMeshGradient(prefix,nodes,elems,groups,VTK):
'''Calculate the laplace gradient for the mesh given as (nodes,elems,groups) using sfepy.'''
tempdir=os.path.dirname(prefix)
infile=prefix+'.mesh'
logfile=prefix+'.log'
outfile=prefix+'.vtk'
probfile=prefix+'.py'
writeMeshFile(infile,nodes,elems,groups,None,3)
with open(problemFile) as p:
with open(probfile,'w') as o:
o.write(p.read()%{'inputfile':infile,'outdir':tempdir})
p= | ProblemConf.from_file(probfile) | sfepy.base.conf.ProblemConf.from_file |
# AtrialFibrePlugin
# Copyright (C) 2018 <NAME>, King's College London, all rights reserved, see LICENSE file
'''
Atrial fibre generation plugin.
'''
import os
import stat
import ast
import shutil
import datetime
import zipfile
import warnings
from itertools import starmap
from collections import defaultdict
try:
import configparser
except ImportError:
import ConfigParser as configparser
try:
from sfepy.base.conf import ProblemConf
from sfepy.applications import solve_pde
from sfepy.base.base import output
except ImportError:
warnings.warn('SfePy needs to be installed or in PYTHONPATH to generate fiber directions.')
from eidolon import ( ui,
ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType,
listToMatrix,MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush
)
import eidolon
import numpy as np
from scipy.spatial import cKDTree
plugindir= os.path.dirname(os.path.abspath(__file__)) # this file's directory
# directory/file names
uifile=os.path.join(plugindir,'AtrialFibrePlugin.ui')
deformDir=os.path.join(plugindir,'deformetricaC')
deformExe=os.path.join(deformDir,'deformetrica')
architecture=os.path.join(plugindir,'architecture.ini')
problemFile=os.path.join(plugindir,'problemfile.py')
# registration file names
decimatedFile='subject.vtk'
targetFile='target.vtk'
datasetFile='data_set.xml'
modelFile='model.xml'
optimFile='optimization_parameters.xml'
registeredFile='Registration_subject_to_subject_0__t_9.vtk'
decimate='decimate-surface'
# deformetrica parameters
kernelWidthSub=5000
kernelWidthDef=8000
kernelType='cudaexact'
dataSigma=0.1
stepSize=0.000001
# field names
#regionField='regions'
#landmarkField='landmarks'
#directionField='directions'
#gradientDirField='gradientDirs'
#elemDirField='elemDirs'
#elemRegionField='elemRegions'
#elemthickness='elemThickness'
#elemGradient='elemGradient'
fieldNames=eidolon.enum(
'regions','landmarks',
'directions','gradientDirs',
'elemDirs','elemRegions','elemThickness',
'nodeGradient'
)
objNames=eidolon.enum(
'atlasmesh',
'origmesh',
'epimesh','epinodes',
'endomesh','endonodes',
'architecture'
)
regTypes=eidolon.enum('endo','epi')
# load the UI file into the ui namespace, this is subtyped below
ui.loadUI(open(uifile).read())
def showLines(nodes,lines,name='Lines',matname='Default'):
mgr=eidolon.getSceneMgr()
lineds=eidolon.LineDataSet(name+'DS',nodes,lines)
obj=MeshSceneObject(name,lineds)
mgr.addSceneObject(obj)
rep=obj.createRepr(eidolon.ReprType._line,matname=matname)
mgr.addSceneObjectRepr(rep)
return obj,rep
def showElemDirs(obj,glyphscale,mgr):
ds=obj.datasets[0]
nodes=ds.getNodes()
tets=first(i for i in ds.enumIndexSets() if i.getType()==ElemType._Tet1NL)
elemdirfield=ds.getDataField(fieldNames._elemDirs)
mid=ElemType.Tet1NL.basis(0.25,0.25,0.25)
elemnodes=[ElemType.Tet1NL.applyCoeffs([nodes[e] for e in elem],mid) for elem in tets]
elemobj=MeshSceneObject('elemobj',PyDataSet('elemobjds',elemnodes,[],[elemdirfield]))
mgr.addSceneObject(elemobj)
rep=elemobj.createRepr(eidolon.ReprType._glyph,0,externalOnly=False,drawInternal=True,glyphname='arrow',
glyphscale=glyphscale,dfield=elemdirfield.getName(),vecfunc=eidolon.VecFunc._Linear)
mgr.addSceneObjectRepr(rep)
class initdict(defaultdict):
def __init__(self,initfunc,*args,**kwargs):
defaultdict.__init__(self,None,*args,**kwargs)
self.initfunc=initfunc
def __missing__(self,key):
value=self.initfunc(key)
self.__setitem__(key,value)
return value
class plane(object):
def __init__(self,center,norm):
self.center=center
self.norm=norm.norm()
def dist(self,pt):
return pt.planeDist(self.center,self.norm)
def moveUp(self,dist):
self.center+=self.norm*dist
def numPointsAbove(self,nodes):
return sum(1 for n in nodes if self.dist(n)>=0)
def between(self,nodes,otherplane):
numnodes=len(nodes)
return self.numPointsAbove(nodes)==numnodes and otherplane.numPointsAbove(nodes)==numnodes
def findIntersects(self,nodes,inds):
numnodes=inds.m()
result=[]
for n in range(inds.n()):
if 0<self.numPointsAbove(nodes.mapIndexRow(inds,n))<numnodes:
result.append(n)
return result
class TriMeshGraph(object):
def __init__(self,nodes,tris,ocdepth=3):
self.nodes=nodes if isinstance(nodes,eidolon.Vec3Matrix) else listToMatrix(nodes,'nodes')
self.tris=tris if isinstance(tris,eidolon.IndexMatrix) else listToMatrix(tris,'tris')
self.tricenters=[avg(self.getTriNodes(r),vec3()) for r in range(self.tris.n())]
self.adj,self.ragged=generateTriAdj(self.tris) # elem -> elems
self.nodeelem=generateNodeElemMap(self.nodes.n(),self.tris) # node -> elems
self.edges=generateSimplexEdgeMap(self.nodes.n(),self.tris) # node -> nodes
self.boundbox=BoundBox(nodes)
# self.octree=eidolon.Octree(ocdepth,self.boundbox.getDimensions(),self.boundbox.center)
# self.octree.addMesh(self.nodes,self.tris)
def computeDist(key):
i,j=key
return self.tricenters[i].distTo(self.tricenters[j])
self.tridists=initdict(computeDist)
# def getIntersectedTri(self,start,end):
# '''
# Returns the triangle index and the (t,u,v) triple if the line from `start' to `end' intersects the indexed triangle
# at a distance of `t' from `start' with xi coord (u,v). Returns None if no triangle intersected.
# '''
# startoc=self.octree.getLeaf(start)
# endoc=self.octree.getLeaf(end)
# inds=(startoc.leafdata if startoc is not None else []) + (endoc.leafdata if endoc is not None else [])
#
# r=eidolon.Ray(start,end-start)
#
# for tri in inds:
# d=r.intersectsTri(*self.getTriNodes(tri))
# if d:# and d[0]<=start.distTo(end):
# return tri,d
#
# return None
def getPathSubgraph(self,starttri,endtri):
return getAdjTo(self.adj,starttri,endtri)
def getTriNodes(self,triindex):
return self.nodes.mapIndexRow(self.tris,triindex)
def getTriNorm(self,triindex):
a,b,c=self.getTriNodes(triindex)
return a.planeNorm(b,c)
def getSharedNodeTris(self,triindex):
tris=set()
for n in self.tris[triindex]:
tris.update(self.nodeelem[n])
tris.remove(triindex)
return list(sorted(tris))
def getNearestTri(self,pt):
def triDist(tri):
norm=self.getTriNorm(tri)
pt=self.tricenters[tri]
return abs(pt.planeDist(pt,norm))
nearestnode=min([n for n in range(self.nodes.n()) if self.nodeelem[n]],key=lambda n:self.nodes[n].distToSq(pt))
tris=self.nodeelem[nearestnode]
return min(tris,key=triDist)
def getPath(self,starttri,endtri,acceptTri=None):
return dijkstra(self.adj,starttri,endtri,lambda i,j:self.tridists[(i,j)],acceptTri)
def loadArchitecture(path,section):
'''
Load the architecture from the given file `path' and return values from the given section (endo or epi). The
return value is a tuple containing:
landmarks: 0-based indices of landmark nodes in the atlas
lmlines : 0-based index pairs defining lines between indices in landmarks
lmregions: a list of maps, each map defining a region which are mappings from a 0-based indices into lmlines to
the line's landmark index pair
lmstim : a per-region specifier list stating which lines (L# for index #) or atlas node (N#) defines stimulation
lground : a per-region specifier list stating which lines (L# for index #) or atlas node (N#) defines ground
appendageregion: region number for the appendage
appendagenode: node index for the appendage's division node which must be generated
'''
c=configparser.SafeConfigParser()
assert len(c.read(path))>0
landmarks=ast.literal_eval(c.get(section,'landmarks')) # 0-based node indices
lines=ast.literal_eval(c.get(section,'lines')) # 1-based landmark indices
regions=ast.literal_eval(c.get(section,'regions')) # 1-based landmark indices
stimulus=ast.literal_eval(c.get(section,'stimulus')) # per region
ground=ast.literal_eval(c.get(section,'ground')) # per region
appendageregion=ast.literal_eval(c.get(section,'appendageregion'))
appendagenode=ast.literal_eval(c.get(section,'appendagenode'))
appendagelmindex=ast.literal_eval(c.get(section,'appendagelmindex'))
# types=ast.literal_eval(c.get(section,'type')) # per region
# indices that don't exist are for landmarks that need to be calculated
lmlines=[subone(l) for l in lines ]#if max(l)<=len(landmarks)] # filter for lines with existing node indices
lmregions=[subone(r) for r in regions]
# lmregions=[subone(r) for r in regions if all(i<=len(landmarks) for i in r)]
lmstim=stimulus#[:len(lmregions)]
lmground=ground#[:len(lmregions)]
allregions=[]
for r in lmregions:
lr={i:(a,b) for i,(a,b) in enumerate(lmlines) if a in r and b in r}
if len(lr)>2:
allregions.append(lr)
return landmarks,lmlines,allregions,lmstim,lmground, appendageregion,appendagenode,appendagelmindex
def writeMeshFile(filename,nodes,inds,nodegroup,indgroup,dim):
'''Write a medit format mesh file to `filename'.'''
with open(filename,'w') as o:
print('MeshVersionFormatted 1',file=o)
print('Dimension %i'%dim,file=o)
print('Vertices',file=o)
print(len(nodes),file=o)
for n in range(len(nodes)):
for v in tuple(nodes[n])[:dim]:
print('%20.10f'%v,end=' ',file=o)
group=0 if nodegroup is None else nodegroup[n]
print(group,file=o)
print('Triangles' if len(inds[0])==3 else 'Tetrahedra',file=o)
print(len(inds),file=o)
for n in range(len(inds)):
print(*['%10i'%(t+1) for t in inds[n]],file=o,end=' ')
group=0 if indgroup is None else indgroup[n]
print(group,file=o)
def createNodeQuery(nodes):
'''
Create a cKDTree object from `nodes' and return a query function which accepts a position and radius value. The
function will return the nearest point index to the given position if radius<=0 and a list of indices of points
within the given radius of the position otherwise. The node list is also returned as a second return value.
'''
tree=cKDTree(np.asarray(list(map(tuple,nodes))))
def _query(pos,radius=0):
'''Query `nodes' for the nearest node to `pos' if `radius'<=0 or a list of those within `radius' otherwise.'''
pos=tuple(pos)
if radius<=0:
return tree.query(pos)[1],tree.data
else:
return tree.query_ball_point(pos,radius),tree.data
return _query
def registerSubjectToTarget(subjectObj,targetObj,targetTrans,outdir,decimpath,VTK):
'''
Register the `subjectObj' mesh object to `targetObj' mesh object putting data into directory `outdir'. The subject
will be decimated to have roughly the same number of nodes as the target and then stored as subject.vtk in `outdir'.
Registration is done with Deformetrica and result stored as 'Registration_subject_to_subject_0__t_9.vtk' in `outdir'.
If `targetTrans' must be None or a transform which is applied to the nodes of `targetObj' before registration.
'''
dpath=os.path.join(outdir,decimatedFile)
tmpfile=os.path.join(outdir,'tmp.vtk')
# if a transform is given, apply that transform to the target mesh when saving otherwise do no transformation
vecfunc=(lambda i: tuple(targetTrans*i)) if targetTrans else None
shutil.copy(os.path.join(deformDir,datasetFile),os.path.join(outdir,datasetFile)) # copy dataset file unchanged
model=open(os.path.join(deformDir,modelFile)).read()
model=model.replace('%1',str(dataSigma))
model=model.replace('%2',str(kernelWidthSub))
model=model.replace('%3',str(kernelType))
model=model.replace('%4',str(kernelWidthDef))
with open(os.path.join(outdir,modelFile),'w') as o: # save modified model file
o.write(model)
optim=open(os.path.join(deformDir,optimFile)).read()
optim=optim.replace('%1',str(stepSize))
with open(os.path.join(outdir,optimFile),'w') as o: # save modified optimization file
o.write(optim)
VTK.saveLegacyFile(tmpfile,subjectObj,datasettype='POLYDATA')
VTK.saveLegacyFile(os.path.join(outdir,targetFile),targetObj,datasettype='POLYDATA',vecfunc=vecfunc)
snodes=subjectObj.datasets[0].getNodes()
tnodes=targetObj.datasets[0].getNodes()
sizeratio=float(tnodes.n())/snodes.n()
sizepercent=str(100*(1-sizeratio))[:6] # percent to decimate by
# decimate the mesh most of the way towards having the same number of nodes as the target
ret,output=eidolon.execBatchProgram(decimpath,tmpfile,dpath,'-reduceby',sizepercent,'-ascii',logcmd=True)
assert ret==0,output
env={'LD_LIBRARY_PATH':deformDir}
ret,output=eidolon.execBatchProgram(deformExe,"registration", "3D", modelFile, datasetFile, optimFile, "--output-dir=.",cwd=outdir,env=env,logcmd=True)
assert ret==0,output
return output
def transferLandmarks(archFilename,fieldname,targetObj,targetTrans,subjectObj,outdir,VTK):
'''
Register the landmarks defined as node indices on `targetObj' to equivalent node indices on `subjectObj' via the
decimated and registered intermediary stored in `outdir'. The result is a list of index pairs associating a node
index in `subjectObj' for every landmark index in `targetObj'.
'''
decimated=os.path.join(outdir,decimatedFile)
registered=os.path.join(outdir,registeredFile)
arch=loadArchitecture(archFilename,fieldname)
lmarks,lines=arch[:2]
appendagelmindex=arch[-1]
# append the index for the estimated appendage node, this will have to be adjusted manually after registration
lmarks.append(appendagelmindex)
reg=VTK.loadObject(registered) # mesh registered to target
dec=VTK.loadObject(decimated) # decimated unregistered mesh
tnodes=targetObj.datasets[0].getNodes() # target points
rnodes=reg.datasets[0].getNodes() # registered decimated points
dnodes=dec.datasets[0].getNodes() # unregistered decimated points
snodes=subjectObj.datasets[0].getNodes() # original subject points
targetTrans=targetTrans or eidolon.transform()
lmpoints=[(targetTrans*tnodes[m],m) for m in lmarks] # (transformed landmark node, index) pairs
# find the points in the registered mesh closest to the landmark points in the target object
query=createNodeQuery(rnodes)
rpoints=[(query(pt)[0],m) for pt,m in lmpoints]
# find the subject nodes closes to landmark points in the decimated mesh (which are at the same indices as in the registered mesh)
query=createNodeQuery(snodes)
spoints=[(query(dnodes[i])[0],m) for i,m in rpoints]
assert len(spoints)==len(lmpoints)
assert all(p[0] is not None for p in spoints)
slines=[l for l in lines if max(l)<len(spoints)]
return spoints,slines # return list (i,m) pairs where node index i in the subject mesh is landmark m
def generateTriAdj(tris):
'''
Generates a table (n,3) giving the indices of adjacent triangles for each triangle, with a value of `n' indicating a
free edge. The indices in each row are in sorted order rather than per triangle edge. The result is the dual of the
triangle mesh represented as the (n,3) array and a map relating the mesh's ragged edges to their triangle.
'''
edgemap = {} # maps edges to the first triangle having that edge
result=IndexMatrix(tris.getName()+'Adj',tris.n(),3)
result.fill(tris.n())
# Find adjacent triangles by constructing a map from edges defined by points (a,b) to the triangle having that edge,
# when that edge is encountered twice then the current triangle is adjacent to the one that originally added the edge.
for t1,tri in enumerate(tris): # iterate over each triangle t1
for a,b in successive(tri,2,True): # iterate over each edge (a,b) of t1
k=(min(a,b),max(a,b)) # key has uniform edge order
t2=edgemap.pop(k,None) # attempt to find edge k in the map, None indicates edge not found
if t2 is not None: # an edge is shared if already encountered, thus t1 is adjacent to t2
result[t1]=sorted(set(result[t1]+(t2,)))
result[t2]=sorted(set(result[t2]+(t1,)))
else:
edgemap[k]=t1 # first time edge is encountered, associate this triangle with it
return result,edgemap
@timing
def getAdjTo(adj,start,end):
'''
Returns a subgraph of `adj',represented as a node->[neighbours] dict, which includes nodes `start' and `end'.
If `end' is None or an index not appearing in the mesh, the result will be the submesh contiguous with `start'.
'''
visiting=set([start])
found={}
numnodes=adj.n()
while visiting and end not in found:
visit=visiting.pop()
neighbours=[n for n in adj[visit] if n<numnodes]
found[visit]=neighbours
visiting.update(n for n in neighbours if n not in found)
return found
def generateNodeElemMap(numnodes,tris):
'''Returns a list relating each node index to the set of element indices using that node.'''
nodemap=[set() for _ in range(numnodes)]
for i,tri in enumerate(tris):
for n in tri:
nodemap[n].add(i)
#assert all(len(s)>0 for s in nodemap), 'Unused nodes in triangle topology'
return nodemap
def generateSimplexEdgeMap(numnodes,simplices):
'''
Returns a list relating each node index to the set of node indices joined to it by graph edges. This assumes the mesh
has `numnodes' number of nodes and simplex topology `simplices'.
'''
nodemap=[set() for _ in range(numnodes)]
for simplex in simplices:
simplex=set(simplex)
for s in simplex:
nodemap[s].update(simplex.difference((s,)))
return nodemap
@timing
def dijkstra(adj, start, end,distFunc,acceptTri=None):
#http://benalexkeen.com/implementing-djikstras-shortest-path-algorithm-with-python/
# shortest paths is a dict of nodes to previous node and distance
paths = {start: (None,0)}
curnode = start
visited = set()
# consider only subgraph containing start and end, this expands geometrically so should contain the minimal path
adj=getAdjTo(adj,start,end)
eidolon.printFlush(len(adj))
if acceptTri is not None:
accept=lambda a: (a in adj and acceptTri(a))
else:
accept=lambda a: a in adj
while curnode != end:
visited.add(curnode)
destinations = list(filter(accept,adj[curnode]))
curweight = paths[curnode][1]
for dest in destinations:
weight = curweight+distFunc(curnode,dest)
if dest not in paths or weight < paths[dest][1]:
paths[dest] = (curnode, weight)
nextnodes = {node: paths[node] for node in paths if node not in visited}
if not nextnodes:
raise ValueError("Route %i -> %i not possible"%(start,end))
# next node is the destination with the lowest weight
curnode = min(nextnodes, key=lambda k:nextnodes[k][1])
# collect path from end node back to the start
path = []
while curnode is not None:
path.insert(0,curnode)
curnode = paths[curnode][0]
return path
def subone(v):
return tuple(i-1 for i in v)
def findNearestIndex(pt,nodelist):
return min(range(len(nodelist)),key=lambda i:pt.distToSq(nodelist[i]))
def findFarthestIndex(pt,nodelist):
return max(range(len(nodelist)),key=lambda i:pt.distToSq(nodelist[i]))
def getContiguousTris(graph,starttri,acceptTri):
accepted=[starttri]
adjacent=first(i for i in graph.getSharedNodeTris(starttri) if i not in accepted and acceptTri(i))
while adjacent is not None:
accepted.append(adjacent)
for a in accepted[::-1]:
allneighbours=graph.getSharedNodeTris(a)
adjacent=first(i for i in allneighbours if i not in accepted and acceptTri(i))
if adjacent:
break
return accepted
@timing
def findTrisBetweenNodes(start,end,landmarks,graph):
eidolon.printFlush('Nodes:',start,end)
start=landmarks[start]
end=landmarks[end]
assert 0<=start<len(graph.nodeelem)
assert 0<=end<len(graph.nodeelem)
starttri=first(graph.nodeelem[start])
endtri=first(graph.nodeelem[end])
assert starttri is not None
assert endtri is not None
nodes=graph.nodes
startnode=nodes[start]
endnode=nodes[end]
easypath= graph.getPath(starttri,endtri)
midnode=graph.tricenters[easypath[len(easypath)//2]]
# define planes to bound the areas to search for triangles to within the space of the line
splane=plane(startnode,midnode-startnode)
eplane=plane(endnode,midnode-endnode)
# adjust the plane's positions to account for numeric error
adjustdist=1e1
splane.moveUp(-adjustdist)
eplane.moveUp(-adjustdist)
assert starttri is not None
assert endtri is not None
# TODO: plane normal determination still needs work
#linenorm=midnode.planeNorm(startnode,endnode)
#linenorm=graph.getTriNorm(easypath[len(easypath)//2]).cross(midnode-startnode)
linenorm=eidolon.avg(graph.getTriNorm(e) for e in easypath).cross(midnode-startnode)
lineplane=plane(splane.center,linenorm)
indices=set([starttri,endtri]) # list of element indices on lineplane between splane and eplane
for i in range(graph.tris.n()):
trinodes=graph.getTriNodes(i)
numabove=lineplane.numPointsAbove(trinodes)
if numabove in (1,2) and splane.between(trinodes,eplane):
indices.add(i)
accepted=getContiguousTris(graph,starttri,lambda i:i in indices)
if endtri not in accepted or len(easypath)<len(accepted):
eidolon.printFlush('---Resorting to easypath')
accepted=easypath
return accepted
@timing
def assignRegion(region,index,assignmat,landmarks,linemap,graph):
def getEnclosedGraph(adj,excludes,start):
visiting=set([start])
found=set()
numnodes=adj.n()
assert start is not None
while visiting:
visit=visiting.pop()
neighbours=[n for n in adj.getRow(visit) if n<numnodes and n not in excludes]
found.add(visit)
visiting.update(n for n in neighbours if n not in found)
return found
# collect all tri indices on the border of this region
bordertris=set()
for lineindex,(a,b) in region.items():
if (a,b) in linemap:
line=linemap[(a,b)]
else:
line=findTrisBetweenNodes(a,b,landmarks,graph)
linemap[(a,b)]=line
linemap[(b,a)]=line
# assign line ID to triangles on the line
for tri in line:
assignmat[tri,0]=lineindex
bordertris.update(line)
bordertri=graph.tricenters[first(bordertris)]
farthest=max(range(len(graph.tris)),key=lambda i:graph.tricenters[i].distToSq(bordertri))
maxgraph=getEnclosedGraph(graph.adj,bordertris,farthest)
for tri in range(len(graph.tris)):
if tri in bordertris or tri not in maxgraph:
if assignmat[tri,1]<0:
assignmat[tri,1]=index
elif assignmat[tri,2]<0:
assignmat[tri,2]=index
elif assignmat[tri,3]<0:
assignmat[tri,3]=index
@timing
def generateRegionField(obj,landmarkObj,regions,appendageregion,appendagenode,task=None):
ds=obj.datasets[0]
nodes=ds.getNodes()
tris=first(ind for ind in ds.enumIndexSets() if ind.m()==3 and bool(ind.meta(StdProps._isspatial)))
lmnodes=landmarkObj.datasets[0].getNodes()
linemap={}
landmarks={i:nodes.indexOf(lm)[0] for i,lm in enumerate(lmnodes)}
assert all(0<=l<nodes.n() for l in landmarks)
graph=TriMeshGraph(nodes,tris)
edgenodeinds=set(eidolon.listSum(graph.ragged)) # list of all node indices on the ragged edge
filledregions=RealMatrix(fieldNames._regions,tris.n(),4)
filledregions.meta(StdProps._elemdata,'True')
filledregions.fill(-10)
#landmarks[appendagenode]=0 # TODO: skipping appendage node for now
for region in regions:
for a,b in region.values():
if appendagenode not in (a,b):
if a in landmarks and b not in landmarks:
oldlmnode=nodes[landmarks[a]]
newlm=b
elif b in landmarks and a not in landmarks:
oldlmnode=nodes[landmarks[b]]
newlm=a
else:
continue
newlmnode=min(edgenodeinds,key=lambda i:nodes[i].distToSq(oldlmnode)) # ragged edge node closest to landmark
landmarks[newlm]=newlmnode
# eidolon.printFlush(newlm,newlmnode,graph.getPath(min(a,b),newlmnode),'\n')
# line=findTrisBetweenNodes(a,b,landmarks,graph)
# for tri in line:
# filledregions[tri,0]=max(a,b)
if task:
task.setMaxProgress(len(regions))
for rindex,region in enumerate(regions):
eidolon.printFlush('Region',rindex,'of',len(regions),region)
allnodes=set(eidolon.listSum(region.values()))
if all(a in landmarks for a in allnodes):
assignRegion(region,rindex,filledregions,landmarks,linemap,graph)
else:
eidolon.printFlush('Skipping',rindex,[a for a in allnodes if a not in landmarks])
if task:
task.setProgress(rindex+1)
return filledregions,linemap
def extractTriRegion(nodes,tris,acceptFunc):
'''
Extract the region from the mesh (nodes,tris) as defined by the triangle acceptance function `acceptFunc'. The return
value is a tuple containing the list of new nodes, a list of new tris, a map from old node indices in `nodes' to new
indices in the returned node list, and a map from triangle indices in `tris' to new ones in the returned triangle list.
'''
#old -> new
newnodes=[] # new node set
newtris=[] # new triangle set
nodemap={} # maps old node indices to new
trimap={} # maps old triangle indices to new
for tri in range(len(tris)):
if acceptFunc(tri):
newtri=list(tris[tri])
for i,n in enumerate(newtri):
if n not in nodemap:
nodemap[n]=len(newnodes)
newnodes.append(nodes[n])
newtri[i]=nodemap[n]
trimap[tri]=len(newtris)
newtris.append(newtri)
return newnodes,newtris,nodemap,trimap
def calculateMeshGradient(prefix,nodes,elems,groups,VTK):
'''Calculate the laplace gradient for the mesh given as (nodes,elems,groups) using sfepy.'''
tempdir=os.path.dirname(prefix)
infile=prefix+'.mesh'
logfile=prefix+'.log'
outfile=prefix+'.vtk'
probfile=prefix+'.py'
writeMeshFile(infile,nodes,elems,groups,None,3)
with open(problemFile) as p:
with open(probfile,'w') as o:
o.write(p.read()%{'inputfile':infile,'outdir':tempdir})
p=ProblemConf.from_file(probfile)
| output.set_output(logfile,True,True) | sfepy.base.base.output.set_output |
# AtrialFibrePlugin
# Copyright (C) 2018 <NAME>, King's College London, all rights reserved, see LICENSE file
'''
Atrial fibre generation plugin.
'''
import os
import stat
import ast
import shutil
import datetime
import zipfile
import warnings
from itertools import starmap
from collections import defaultdict
try:
import configparser
except ImportError:
import ConfigParser as configparser
try:
from sfepy.base.conf import ProblemConf
from sfepy.applications import solve_pde
from sfepy.base.base import output
except ImportError:
warnings.warn('SfePy needs to be installed or in PYTHONPATH to generate fiber directions.')
from eidolon import ( ui,
ScenePlugin, Project, avg, vec3, successive, first, RealMatrix, IndexMatrix, StdProps, timing, ReprType,
listToMatrix,MeshSceneObject, BoundBox, ElemType, reduceMesh, PyDataSet, taskmethod, taskroutine, printFlush
)
import eidolon
import numpy as np
from scipy.spatial import cKDTree
plugindir= os.path.dirname(os.path.abspath(__file__)) # this file's directory
# directory/file names
uifile=os.path.join(plugindir,'AtrialFibrePlugin.ui')
deformDir=os.path.join(plugindir,'deformetricaC')
deformExe=os.path.join(deformDir,'deformetrica')
architecture=os.path.join(plugindir,'architecture.ini')
problemFile=os.path.join(plugindir,'problemfile.py')
# registration file names
decimatedFile='subject.vtk'
targetFile='target.vtk'
datasetFile='data_set.xml'
modelFile='model.xml'
optimFile='optimization_parameters.xml'
registeredFile='Registration_subject_to_subject_0__t_9.vtk'
decimate='decimate-surface'
# deformetrica parameters
kernelWidthSub=5000
kernelWidthDef=8000
kernelType='cudaexact'
dataSigma=0.1
stepSize=0.000001
# field names
#regionField='regions'
#landmarkField='landmarks'
#directionField='directions'
#gradientDirField='gradientDirs'
#elemDirField='elemDirs'
#elemRegionField='elemRegions'
#elemthickness='elemThickness'
#elemGradient='elemGradient'
fieldNames=eidolon.enum(
'regions','landmarks',
'directions','gradientDirs',
'elemDirs','elemRegions','elemThickness',
'nodeGradient'
)
objNames=eidolon.enum(
'atlasmesh',
'origmesh',
'epimesh','epinodes',
'endomesh','endonodes',
'architecture'
)
regTypes=eidolon.enum('endo','epi')
# load the UI file into the ui namespace, this is subtyped below
ui.loadUI(open(uifile).read())
def showLines(nodes,lines,name='Lines',matname='Default'):
mgr=eidolon.getSceneMgr()
lineds=eidolon.LineDataSet(name+'DS',nodes,lines)
obj=MeshSceneObject(name,lineds)
mgr.addSceneObject(obj)
rep=obj.createRepr(eidolon.ReprType._line,matname=matname)
mgr.addSceneObjectRepr(rep)
return obj,rep
def showElemDirs(obj,glyphscale,mgr):
ds=obj.datasets[0]
nodes=ds.getNodes()
tets=first(i for i in ds.enumIndexSets() if i.getType()==ElemType._Tet1NL)
elemdirfield=ds.getDataField(fieldNames._elemDirs)
mid=ElemType.Tet1NL.basis(0.25,0.25,0.25)
elemnodes=[ElemType.Tet1NL.applyCoeffs([nodes[e] for e in elem],mid) for elem in tets]
elemobj=MeshSceneObject('elemobj',PyDataSet('elemobjds',elemnodes,[],[elemdirfield]))
mgr.addSceneObject(elemobj)
rep=elemobj.createRepr(eidolon.ReprType._glyph,0,externalOnly=False,drawInternal=True,glyphname='arrow',
glyphscale=glyphscale,dfield=elemdirfield.getName(),vecfunc=eidolon.VecFunc._Linear)
mgr.addSceneObjectRepr(rep)
class initdict(defaultdict):
def __init__(self,initfunc,*args,**kwargs):
defaultdict.__init__(self,None,*args,**kwargs)
self.initfunc=initfunc
def __missing__(self,key):
value=self.initfunc(key)
self.__setitem__(key,value)
return value
class plane(object):
def __init__(self,center,norm):
self.center=center
self.norm=norm.norm()
def dist(self,pt):
return pt.planeDist(self.center,self.norm)
def moveUp(self,dist):
self.center+=self.norm*dist
def numPointsAbove(self,nodes):
return sum(1 for n in nodes if self.dist(n)>=0)
def between(self,nodes,otherplane):
numnodes=len(nodes)
return self.numPointsAbove(nodes)==numnodes and otherplane.numPointsAbove(nodes)==numnodes
def findIntersects(self,nodes,inds):
numnodes=inds.m()
result=[]
for n in range(inds.n()):
if 0<self.numPointsAbove(nodes.mapIndexRow(inds,n))<numnodes:
result.append(n)
return result
class TriMeshGraph(object):
def __init__(self,nodes,tris,ocdepth=3):
self.nodes=nodes if isinstance(nodes,eidolon.Vec3Matrix) else listToMatrix(nodes,'nodes')
self.tris=tris if isinstance(tris,eidolon.IndexMatrix) else listToMatrix(tris,'tris')
self.tricenters=[avg(self.getTriNodes(r),vec3()) for r in range(self.tris.n())]
self.adj,self.ragged=generateTriAdj(self.tris) # elem -> elems
self.nodeelem=generateNodeElemMap(self.nodes.n(),self.tris) # node -> elems
self.edges=generateSimplexEdgeMap(self.nodes.n(),self.tris) # node -> nodes
self.boundbox=BoundBox(nodes)
# self.octree=eidolon.Octree(ocdepth,self.boundbox.getDimensions(),self.boundbox.center)
# self.octree.addMesh(self.nodes,self.tris)
def computeDist(key):
i,j=key
return self.tricenters[i].distTo(self.tricenters[j])
self.tridists=initdict(computeDist)
# def getIntersectedTri(self,start,end):
# '''
# Returns the triangle index and the (t,u,v) triple if the line from `start' to `end' intersects the indexed triangle
# at a distance of `t' from `start' with xi coord (u,v). Returns None if no triangle intersected.
# '''
# startoc=self.octree.getLeaf(start)
# endoc=self.octree.getLeaf(end)
# inds=(startoc.leafdata if startoc is not None else []) + (endoc.leafdata if endoc is not None else [])
#
# r=eidolon.Ray(start,end-start)
#
# for tri in inds:
# d=r.intersectsTri(*self.getTriNodes(tri))
# if d:# and d[0]<=start.distTo(end):
# return tri,d
#
# return None
def getPathSubgraph(self,starttri,endtri):
return getAdjTo(self.adj,starttri,endtri)
def getTriNodes(self,triindex):
return self.nodes.mapIndexRow(self.tris,triindex)
def getTriNorm(self,triindex):
a,b,c=self.getTriNodes(triindex)
return a.planeNorm(b,c)
def getSharedNodeTris(self,triindex):
tris=set()
for n in self.tris[triindex]:
tris.update(self.nodeelem[n])
tris.remove(triindex)
return list(sorted(tris))
def getNearestTri(self,pt):
def triDist(tri):
norm=self.getTriNorm(tri)
pt=self.tricenters[tri]
return abs(pt.planeDist(pt,norm))
nearestnode=min([n for n in range(self.nodes.n()) if self.nodeelem[n]],key=lambda n:self.nodes[n].distToSq(pt))
tris=self.nodeelem[nearestnode]
return min(tris,key=triDist)
def getPath(self,starttri,endtri,acceptTri=None):
return dijkstra(self.adj,starttri,endtri,lambda i,j:self.tridists[(i,j)],acceptTri)
def loadArchitecture(path,section):
'''
Load the architecture from the given file `path' and return values from the given section (endo or epi). The
return value is a tuple containing:
landmarks: 0-based indices of landmark nodes in the atlas
lmlines : 0-based index pairs defining lines between indices in landmarks
lmregions: a list of maps, each map defining a region which are mappings from a 0-based indices into lmlines to
the line's landmark index pair
lmstim : a per-region specifier list stating which lines (L# for index #) or atlas node (N#) defines stimulation
lground : a per-region specifier list stating which lines (L# for index #) or atlas node (N#) defines ground
appendageregion: region number for the appendage
appendagenode: node index for the appendage's division node which must be generated
'''
c=configparser.SafeConfigParser()
assert len(c.read(path))>0
landmarks=ast.literal_eval(c.get(section,'landmarks')) # 0-based node indices
lines=ast.literal_eval(c.get(section,'lines')) # 1-based landmark indices
regions=ast.literal_eval(c.get(section,'regions')) # 1-based landmark indices
stimulus=ast.literal_eval(c.get(section,'stimulus')) # per region
ground=ast.literal_eval(c.get(section,'ground')) # per region
appendageregion=ast.literal_eval(c.get(section,'appendageregion'))
appendagenode=ast.literal_eval(c.get(section,'appendagenode'))
appendagelmindex=ast.literal_eval(c.get(section,'appendagelmindex'))
# types=ast.literal_eval(c.get(section,'type')) # per region
# indices that don't exist are for landmarks that need to be calculated
lmlines=[subone(l) for l in lines ]#if max(l)<=len(landmarks)] # filter for lines with existing node indices
lmregions=[subone(r) for r in regions]
# lmregions=[subone(r) for r in regions if all(i<=len(landmarks) for i in r)]
lmstim=stimulus#[:len(lmregions)]
lmground=ground#[:len(lmregions)]
allregions=[]
for r in lmregions:
lr={i:(a,b) for i,(a,b) in enumerate(lmlines) if a in r and b in r}
if len(lr)>2:
allregions.append(lr)
return landmarks,lmlines,allregions,lmstim,lmground, appendageregion,appendagenode,appendagelmindex
def writeMeshFile(filename,nodes,inds,nodegroup,indgroup,dim):
'''Write a medit format mesh file to `filename'.'''
with open(filename,'w') as o:
print('MeshVersionFormatted 1',file=o)
print('Dimension %i'%dim,file=o)
print('Vertices',file=o)
print(len(nodes),file=o)
for n in range(len(nodes)):
for v in tuple(nodes[n])[:dim]:
print('%20.10f'%v,end=' ',file=o)
group=0 if nodegroup is None else nodegroup[n]
print(group,file=o)
print('Triangles' if len(inds[0])==3 else 'Tetrahedra',file=o)
print(len(inds),file=o)
for n in range(len(inds)):
print(*['%10i'%(t+1) for t in inds[n]],file=o,end=' ')
group=0 if indgroup is None else indgroup[n]
print(group,file=o)
def createNodeQuery(nodes):
'''
Create a cKDTree object from `nodes' and return a query function which accepts a position and radius value. The
function will return the nearest point index to the given position if radius<=0 and a list of indices of points
within the given radius of the position otherwise. The node list is also returned as a second return value.
'''
tree=cKDTree(np.asarray(list(map(tuple,nodes))))
def _query(pos,radius=0):
'''Query `nodes' for the nearest node to `pos' if `radius'<=0 or a list of those within `radius' otherwise.'''
pos=tuple(pos)
if radius<=0:
return tree.query(pos)[1],tree.data
else:
return tree.query_ball_point(pos,radius),tree.data
return _query
def registerSubjectToTarget(subjectObj,targetObj,targetTrans,outdir,decimpath,VTK):
'''
Register the `subjectObj' mesh object to `targetObj' mesh object putting data into directory `outdir'. The subject
will be decimated to have roughly the same number of nodes as the target and then stored as subject.vtk in `outdir'.
Registration is done with Deformetrica and result stored as 'Registration_subject_to_subject_0__t_9.vtk' in `outdir'.
If `targetTrans' must be None or a transform which is applied to the nodes of `targetObj' before registration.
'''
dpath=os.path.join(outdir,decimatedFile)
tmpfile=os.path.join(outdir,'tmp.vtk')
# if a transform is given, apply that transform to the target mesh when saving otherwise do no transformation
vecfunc=(lambda i: tuple(targetTrans*i)) if targetTrans else None
shutil.copy(os.path.join(deformDir,datasetFile),os.path.join(outdir,datasetFile)) # copy dataset file unchanged
model=open(os.path.join(deformDir,modelFile)).read()
model=model.replace('%1',str(dataSigma))
model=model.replace('%2',str(kernelWidthSub))
model=model.replace('%3',str(kernelType))
model=model.replace('%4',str(kernelWidthDef))
with open(os.path.join(outdir,modelFile),'w') as o: # save modified model file
o.write(model)
optim=open(os.path.join(deformDir,optimFile)).read()
optim=optim.replace('%1',str(stepSize))
with open(os.path.join(outdir,optimFile),'w') as o: # save modified optimization file
o.write(optim)
VTK.saveLegacyFile(tmpfile,subjectObj,datasettype='POLYDATA')
VTK.saveLegacyFile(os.path.join(outdir,targetFile),targetObj,datasettype='POLYDATA',vecfunc=vecfunc)
snodes=subjectObj.datasets[0].getNodes()
tnodes=targetObj.datasets[0].getNodes()
sizeratio=float(tnodes.n())/snodes.n()
sizepercent=str(100*(1-sizeratio))[:6] # percent to decimate by
# decimate the mesh most of the way towards having the same number of nodes as the target
ret,output=eidolon.execBatchProgram(decimpath,tmpfile,dpath,'-reduceby',sizepercent,'-ascii',logcmd=True)
assert ret==0,output
env={'LD_LIBRARY_PATH':deformDir}
ret,output=eidolon.execBatchProgram(deformExe,"registration", "3D", modelFile, datasetFile, optimFile, "--output-dir=.",cwd=outdir,env=env,logcmd=True)
assert ret==0,output
return output
def transferLandmarks(archFilename,fieldname,targetObj,targetTrans,subjectObj,outdir,VTK):
'''
Register the landmarks defined as node indices on `targetObj' to equivalent node indices on `subjectObj' via the
decimated and registered intermediary stored in `outdir'. The result is a list of index pairs associating a node
index in `subjectObj' for every landmark index in `targetObj'.
'''
decimated=os.path.join(outdir,decimatedFile)
registered=os.path.join(outdir,registeredFile)
arch=loadArchitecture(archFilename,fieldname)
lmarks,lines=arch[:2]
appendagelmindex=arch[-1]
# append the index for the estimated appendage node, this will have to be adjusted manually after registration
lmarks.append(appendagelmindex)
reg=VTK.loadObject(registered) # mesh registered to target
dec=VTK.loadObject(decimated) # decimated unregistered mesh
tnodes=targetObj.datasets[0].getNodes() # target points
rnodes=reg.datasets[0].getNodes() # registered decimated points
dnodes=dec.datasets[0].getNodes() # unregistered decimated points
snodes=subjectObj.datasets[0].getNodes() # original subject points
targetTrans=targetTrans or eidolon.transform()
lmpoints=[(targetTrans*tnodes[m],m) for m in lmarks] # (transformed landmark node, index) pairs
# find the points in the registered mesh closest to the landmark points in the target object
query=createNodeQuery(rnodes)
rpoints=[(query(pt)[0],m) for pt,m in lmpoints]
# find the subject nodes closes to landmark points in the decimated mesh (which are at the same indices as in the registered mesh)
query=createNodeQuery(snodes)
spoints=[(query(dnodes[i])[0],m) for i,m in rpoints]
assert len(spoints)==len(lmpoints)
assert all(p[0] is not None for p in spoints)
slines=[l for l in lines if max(l)<len(spoints)]
return spoints,slines # return list (i,m) pairs where node index i in the subject mesh is landmark m
def generateTriAdj(tris):
'''
Generates a table (n,3) giving the indices of adjacent triangles for each triangle, with a value of `n' indicating a
free edge. The indices in each row are in sorted order rather than per triangle edge. The result is the dual of the
triangle mesh represented as the (n,3) array and a map relating the mesh's ragged edges to their triangle.
'''
edgemap = {} # maps edges to the first triangle having that edge
result=IndexMatrix(tris.getName()+'Adj',tris.n(),3)
result.fill(tris.n())
# Find adjacent triangles by constructing a map from edges defined by points (a,b) to the triangle having that edge,
# when that edge is encountered twice then the current triangle is adjacent to the one that originally added the edge.
for t1,tri in enumerate(tris): # iterate over each triangle t1
for a,b in successive(tri,2,True): # iterate over each edge (a,b) of t1
k=(min(a,b),max(a,b)) # key has uniform edge order
t2=edgemap.pop(k,None) # attempt to find edge k in the map, None indicates edge not found
if t2 is not None: # an edge is shared if already encountered, thus t1 is adjacent to t2
result[t1]=sorted(set(result[t1]+(t2,)))
result[t2]=sorted(set(result[t2]+(t1,)))
else:
edgemap[k]=t1 # first time edge is encountered, associate this triangle with it
return result,edgemap
@timing
def getAdjTo(adj,start,end):
'''
Returns a subgraph of `adj',represented as a node->[neighbours] dict, which includes nodes `start' and `end'.
If `end' is None or an index not appearing in the mesh, the result will be the submesh contiguous with `start'.
'''
visiting=set([start])
found={}
numnodes=adj.n()
while visiting and end not in found:
visit=visiting.pop()
neighbours=[n for n in adj[visit] if n<numnodes]
found[visit]=neighbours
visiting.update(n for n in neighbours if n not in found)
return found
def generateNodeElemMap(numnodes,tris):
'''Returns a list relating each node index to the set of element indices using that node.'''
nodemap=[set() for _ in range(numnodes)]
for i,tri in enumerate(tris):
for n in tri:
nodemap[n].add(i)
#assert all(len(s)>0 for s in nodemap), 'Unused nodes in triangle topology'
return nodemap
def generateSimplexEdgeMap(numnodes,simplices):
'''
Returns a list relating each node index to the set of node indices joined to it by graph edges. This assumes the mesh
has `numnodes' number of nodes and simplex topology `simplices'.
'''
nodemap=[set() for _ in range(numnodes)]
for simplex in simplices:
simplex=set(simplex)
for s in simplex:
nodemap[s].update(simplex.difference((s,)))
return nodemap
@timing
def dijkstra(adj, start, end,distFunc,acceptTri=None):
#http://benalexkeen.com/implementing-djikstras-shortest-path-algorithm-with-python/
# shortest paths is a dict of nodes to previous node and distance
paths = {start: (None,0)}
curnode = start
visited = set()
# consider only subgraph containing start and end, this expands geometrically so should contain the minimal path
adj=getAdjTo(adj,start,end)
eidolon.printFlush(len(adj))
if acceptTri is not None:
accept=lambda a: (a in adj and acceptTri(a))
else:
accept=lambda a: a in adj
while curnode != end:
visited.add(curnode)
destinations = list(filter(accept,adj[curnode]))
curweight = paths[curnode][1]
for dest in destinations:
weight = curweight+distFunc(curnode,dest)
if dest not in paths or weight < paths[dest][1]:
paths[dest] = (curnode, weight)
nextnodes = {node: paths[node] for node in paths if node not in visited}
if not nextnodes:
raise ValueError("Route %i -> %i not possible"%(start,end))
# next node is the destination with the lowest weight
curnode = min(nextnodes, key=lambda k:nextnodes[k][1])
# collect path from end node back to the start
path = []
while curnode is not None:
path.insert(0,curnode)
curnode = paths[curnode][0]
return path
def subone(v):
return tuple(i-1 for i in v)
def findNearestIndex(pt,nodelist):
return min(range(len(nodelist)),key=lambda i:pt.distToSq(nodelist[i]))
def findFarthestIndex(pt,nodelist):
return max(range(len(nodelist)),key=lambda i:pt.distToSq(nodelist[i]))
def getContiguousTris(graph,starttri,acceptTri):
accepted=[starttri]
adjacent=first(i for i in graph.getSharedNodeTris(starttri) if i not in accepted and acceptTri(i))
while adjacent is not None:
accepted.append(adjacent)
for a in accepted[::-1]:
allneighbours=graph.getSharedNodeTris(a)
adjacent=first(i for i in allneighbours if i not in accepted and acceptTri(i))
if adjacent:
break
return accepted
@timing
def findTrisBetweenNodes(start,end,landmarks,graph):
eidolon.printFlush('Nodes:',start,end)
start=landmarks[start]
end=landmarks[end]
assert 0<=start<len(graph.nodeelem)
assert 0<=end<len(graph.nodeelem)
starttri=first(graph.nodeelem[start])
endtri=first(graph.nodeelem[end])
assert starttri is not None
assert endtri is not None
nodes=graph.nodes
startnode=nodes[start]
endnode=nodes[end]
easypath= graph.getPath(starttri,endtri)
midnode=graph.tricenters[easypath[len(easypath)//2]]
# define planes to bound the areas to search for triangles to within the space of the line
splane=plane(startnode,midnode-startnode)
eplane=plane(endnode,midnode-endnode)
# adjust the plane's positions to account for numeric error
adjustdist=1e1
splane.moveUp(-adjustdist)
eplane.moveUp(-adjustdist)
assert starttri is not None
assert endtri is not None
# TODO: plane normal determination still needs work
#linenorm=midnode.planeNorm(startnode,endnode)
#linenorm=graph.getTriNorm(easypath[len(easypath)//2]).cross(midnode-startnode)
linenorm=eidolon.avg(graph.getTriNorm(e) for e in easypath).cross(midnode-startnode)
lineplane=plane(splane.center,linenorm)
indices=set([starttri,endtri]) # list of element indices on lineplane between splane and eplane
for i in range(graph.tris.n()):
trinodes=graph.getTriNodes(i)
numabove=lineplane.numPointsAbove(trinodes)
if numabove in (1,2) and splane.between(trinodes,eplane):
indices.add(i)
accepted=getContiguousTris(graph,starttri,lambda i:i in indices)
if endtri not in accepted or len(easypath)<len(accepted):
eidolon.printFlush('---Resorting to easypath')
accepted=easypath
return accepted
@timing
def assignRegion(region,index,assignmat,landmarks,linemap,graph):
def getEnclosedGraph(adj,excludes,start):
visiting=set([start])
found=set()
numnodes=adj.n()
assert start is not None
while visiting:
visit=visiting.pop()
neighbours=[n for n in adj.getRow(visit) if n<numnodes and n not in excludes]
found.add(visit)
visiting.update(n for n in neighbours if n not in found)
return found
# collect all tri indices on the border of this region
bordertris=set()
for lineindex,(a,b) in region.items():
if (a,b) in linemap:
line=linemap[(a,b)]
else:
line=findTrisBetweenNodes(a,b,landmarks,graph)
linemap[(a,b)]=line
linemap[(b,a)]=line
# assign line ID to triangles on the line
for tri in line:
assignmat[tri,0]=lineindex
bordertris.update(line)
bordertri=graph.tricenters[first(bordertris)]
farthest=max(range(len(graph.tris)),key=lambda i:graph.tricenters[i].distToSq(bordertri))
maxgraph=getEnclosedGraph(graph.adj,bordertris,farthest)
for tri in range(len(graph.tris)):
if tri in bordertris or tri not in maxgraph:
if assignmat[tri,1]<0:
assignmat[tri,1]=index
elif assignmat[tri,2]<0:
assignmat[tri,2]=index
elif assignmat[tri,3]<0:
assignmat[tri,3]=index
@timing
def generateRegionField(obj,landmarkObj,regions,appendageregion,appendagenode,task=None):
ds=obj.datasets[0]
nodes=ds.getNodes()
tris=first(ind for ind in ds.enumIndexSets() if ind.m()==3 and bool(ind.meta(StdProps._isspatial)))
lmnodes=landmarkObj.datasets[0].getNodes()
linemap={}
landmarks={i:nodes.indexOf(lm)[0] for i,lm in enumerate(lmnodes)}
assert all(0<=l<nodes.n() for l in landmarks)
graph=TriMeshGraph(nodes,tris)
edgenodeinds=set(eidolon.listSum(graph.ragged)) # list of all node indices on the ragged edge
filledregions=RealMatrix(fieldNames._regions,tris.n(),4)
filledregions.meta(StdProps._elemdata,'True')
filledregions.fill(-10)
#landmarks[appendagenode]=0 # TODO: skipping appendage node for now
for region in regions:
for a,b in region.values():
if appendagenode not in (a,b):
if a in landmarks and b not in landmarks:
oldlmnode=nodes[landmarks[a]]
newlm=b
elif b in landmarks and a not in landmarks:
oldlmnode=nodes[landmarks[b]]
newlm=a
else:
continue
newlmnode=min(edgenodeinds,key=lambda i:nodes[i].distToSq(oldlmnode)) # ragged edge node closest to landmark
landmarks[newlm]=newlmnode
# eidolon.printFlush(newlm,newlmnode,graph.getPath(min(a,b),newlmnode),'\n')
# line=findTrisBetweenNodes(a,b,landmarks,graph)
# for tri in line:
# filledregions[tri,0]=max(a,b)
if task:
task.setMaxProgress(len(regions))
for rindex,region in enumerate(regions):
eidolon.printFlush('Region',rindex,'of',len(regions),region)
allnodes=set(eidolon.listSum(region.values()))
if all(a in landmarks for a in allnodes):
assignRegion(region,rindex,filledregions,landmarks,linemap,graph)
else:
eidolon.printFlush('Skipping',rindex,[a for a in allnodes if a not in landmarks])
if task:
task.setProgress(rindex+1)
return filledregions,linemap
def extractTriRegion(nodes,tris,acceptFunc):
'''
Extract the region from the mesh (nodes,tris) as defined by the triangle acceptance function `acceptFunc'. The return
value is a tuple containing the list of new nodes, a list of new tris, a map from old node indices in `nodes' to new
indices in the returned node list, and a map from triangle indices in `tris' to new ones in the returned triangle list.
'''
#old -> new
newnodes=[] # new node set
newtris=[] # new triangle set
nodemap={} # maps old node indices to new
trimap={} # maps old triangle indices to new
for tri in range(len(tris)):
if acceptFunc(tri):
newtri=list(tris[tri])
for i,n in enumerate(newtri):
if n not in nodemap:
nodemap[n]=len(newnodes)
newnodes.append(nodes[n])
newtri[i]=nodemap[n]
trimap[tri]=len(newtris)
newtris.append(newtri)
return newnodes,newtris,nodemap,trimap
def calculateMeshGradient(prefix,nodes,elems,groups,VTK):
'''Calculate the laplace gradient for the mesh given as (nodes,elems,groups) using sfepy.'''
tempdir=os.path.dirname(prefix)
infile=prefix+'.mesh'
logfile=prefix+'.log'
outfile=prefix+'.vtk'
probfile=prefix+'.py'
writeMeshFile(infile,nodes,elems,groups,None,3)
with open(problemFile) as p:
with open(probfile,'w') as o:
o.write(p.read()%{'inputfile':infile,'outdir':tempdir})
p=ProblemConf.from_file(probfile)
output.set_output(logfile,True,True)
| solve_pde(p) | sfepy.applications.solve_pde |
# mixed formulation
# 07.08.2009
#!
#! Homogenization: Linear Elasticity
#! =================================
#$ \centerline{Example input file, \today}
#! Homogenization of heterogeneous linear elastic material - mixed formulation
import numpy as nm
import sfepy.discrete.fem.periodic as per
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson_mixed, bulk_from_youngpoisson
from sfepy.homogenization.utils import define_box_regions, get_box_volume
import sfepy.homogenization.coefs_base as cb
from sfepy import data_dir
from sfepy.base.base import Struct
from sfepy.homogenization.recovery import compute_micro_u, compute_stress_strain_u, compute_mac_stress_part, add_stress_p
def recovery_le( pb, corrs, macro ):
out = {}
dim = corrs['corrs_le']['u_00'].shape[1]
mic_u = - compute_micro_u( corrs['corrs_le'], macro['strain'], 'u', dim )
mic_p = - compute_micro_u( corrs['corrs_le'], macro['strain'], 'p', dim )
out['u_mic'] = Struct( name = 'output_data',
mode = 'vertex', data = mic_u,
var_name = 'u', dofs = None )
out['p_mic'] = Struct( name = 'output_data',
mode = 'cell', data = mic_p[:,nm.newaxis,
:,nm.newaxis],
var_name = 'p', dofs = None )
stress_Y, strain_Y = compute_stress_strain_u( pb, 'i', 'Y', 'mat.D', 'u', mic_u )
stress_Y += compute_mac_stress_part( pb, 'i', 'Y', 'mat.D', 'u', macro['strain'] )
add_stress_p( stress_Y, pb, 'i', 'Y', 'p', mic_p )
strain = macro['strain'] + strain_Y
out['cauchy_strain'] = Struct( name = 'output_data',
mode = 'cell', data = strain,
dofs = None )
out['cauchy_stress'] = Struct( name = 'output_data',
mode = 'cell', data = stress_Y,
dofs = None )
return out
#! Mesh
#! ----
dim = 3
filename_mesh = data_dir + '/meshes/3d/matrix_fiber.mesh'
region_lbn = (0, 0, 0)
region_rtf = (1, 1, 1)
#! Regions
#! -------
#! Regions, edges, ...
regions = {
'Y' : 'all',
'Ym' : 'cells of group 1',
'Yc' : 'cells of group 2',
}
regions.update( define_box_regions( dim, region_lbn, region_rtf ) )
#! Materials
#! ---------
materials = {
'mat' : ({'D' : {'Ym': | stiffness_from_youngpoisson_mixed(dim, 7.0e9, 0.4) | sfepy.mechanics.matcoefs.stiffness_from_youngpoisson_mixed |
# mixed formulation
# 07.08.2009
#!
#! Homogenization: Linear Elasticity
#! =================================
#$ \centerline{Example input file, \today}
#! Homogenization of heterogeneous linear elastic material - mixed formulation
import numpy as nm
import sfepy.discrete.fem.periodic as per
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson_mixed, bulk_from_youngpoisson
from sfepy.homogenization.utils import define_box_regions, get_box_volume
import sfepy.homogenization.coefs_base as cb
from sfepy import data_dir
from sfepy.base.base import Struct
from sfepy.homogenization.recovery import compute_micro_u, compute_stress_strain_u, compute_mac_stress_part, add_stress_p
def recovery_le( pb, corrs, macro ):
out = {}
dim = corrs['corrs_le']['u_00'].shape[1]
mic_u = - compute_micro_u( corrs['corrs_le'], macro['strain'], 'u', dim )
mic_p = - compute_micro_u( corrs['corrs_le'], macro['strain'], 'p', dim )
out['u_mic'] = Struct( name = 'output_data',
mode = 'vertex', data = mic_u,
var_name = 'u', dofs = None )
out['p_mic'] = Struct( name = 'output_data',
mode = 'cell', data = mic_p[:,nm.newaxis,
:,nm.newaxis],
var_name = 'p', dofs = None )
stress_Y, strain_Y = compute_stress_strain_u( pb, 'i', 'Y', 'mat.D', 'u', mic_u )
stress_Y += compute_mac_stress_part( pb, 'i', 'Y', 'mat.D', 'u', macro['strain'] )
add_stress_p( stress_Y, pb, 'i', 'Y', 'p', mic_p )
strain = macro['strain'] + strain_Y
out['cauchy_strain'] = Struct( name = 'output_data',
mode = 'cell', data = strain,
dofs = None )
out['cauchy_stress'] = Struct( name = 'output_data',
mode = 'cell', data = stress_Y,
dofs = None )
return out
#! Mesh
#! ----
dim = 3
filename_mesh = data_dir + '/meshes/3d/matrix_fiber.mesh'
region_lbn = (0, 0, 0)
region_rtf = (1, 1, 1)
#! Regions
#! -------
#! Regions, edges, ...
regions = {
'Y' : 'all',
'Ym' : 'cells of group 1',
'Yc' : 'cells of group 2',
}
regions.update( define_box_regions( dim, region_lbn, region_rtf ) )
#! Materials
#! ---------
materials = {
'mat' : ({'D' : {'Ym': stiffness_from_youngpoisson_mixed(dim, 7.0e9, 0.4),
'Yc': | stiffness_from_youngpoisson_mixed(dim, 70.0e9, 0.2) | sfepy.mechanics.matcoefs.stiffness_from_youngpoisson_mixed |
# mixed formulation
# 07.08.2009
#!
#! Homogenization: Linear Elasticity
#! =================================
#$ \centerline{Example input file, \today}
#! Homogenization of heterogeneous linear elastic material - mixed formulation
import numpy as nm
import sfepy.discrete.fem.periodic as per
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson_mixed, bulk_from_youngpoisson
from sfepy.homogenization.utils import define_box_regions, get_box_volume
import sfepy.homogenization.coefs_base as cb
from sfepy import data_dir
from sfepy.base.base import Struct
from sfepy.homogenization.recovery import compute_micro_u, compute_stress_strain_u, compute_mac_stress_part, add_stress_p
def recovery_le( pb, corrs, macro ):
out = {}
dim = corrs['corrs_le']['u_00'].shape[1]
mic_u = - compute_micro_u( corrs['corrs_le'], macro['strain'], 'u', dim )
mic_p = - compute_micro_u( corrs['corrs_le'], macro['strain'], 'p', dim )
out['u_mic'] = Struct( name = 'output_data',
mode = 'vertex', data = mic_u,
var_name = 'u', dofs = None )
out['p_mic'] = Struct( name = 'output_data',
mode = 'cell', data = mic_p[:,nm.newaxis,
:,nm.newaxis],
var_name = 'p', dofs = None )
stress_Y, strain_Y = compute_stress_strain_u( pb, 'i', 'Y', 'mat.D', 'u', mic_u )
stress_Y += compute_mac_stress_part( pb, 'i', 'Y', 'mat.D', 'u', macro['strain'] )
add_stress_p( stress_Y, pb, 'i', 'Y', 'p', mic_p )
strain = macro['strain'] + strain_Y
out['cauchy_strain'] = Struct( name = 'output_data',
mode = 'cell', data = strain,
dofs = None )
out['cauchy_stress'] = Struct( name = 'output_data',
mode = 'cell', data = stress_Y,
dofs = None )
return out
#! Mesh
#! ----
dim = 3
filename_mesh = data_dir + '/meshes/3d/matrix_fiber.mesh'
region_lbn = (0, 0, 0)
region_rtf = (1, 1, 1)
#! Regions
#! -------
#! Regions, edges, ...
regions = {
'Y' : 'all',
'Ym' : 'cells of group 1',
'Yc' : 'cells of group 2',
}
regions.update( define_box_regions( dim, region_lbn, region_rtf ) )
#! Materials
#! ---------
materials = {
'mat' : ({'D' : {'Ym': stiffness_from_youngpoisson_mixed(dim, 7.0e9, 0.4),
'Yc': stiffness_from_youngpoisson_mixed(dim, 70.0e9, 0.2)},
'gamma': {'Ym': 1.0/ | bulk_from_youngpoisson(7.0e9, 0.4) | sfepy.mechanics.matcoefs.bulk_from_youngpoisson |
# This example implements homogenization of piezoeletric porous media.
# The mathematical model and numerical results are described in:
#
# <NAME>., <NAME>.
# Homogenization of the fluid-saturated piezoelectric porous media.
# International Journal of Solids and Structures
# Volume 147, 15 August 2018, Pages 110-125
# https://doi.org/10.1016/j.ijsolstr.2018.05.017
#
# Run calculation of homogeized coefficients:
#
# ./homogen.py example_poropiezo-1/poropiezo_micro_dfc.py
#
# The results are stored in `example_poropiezo-1/results` directory.
#
import sys
import numpy as nm
import os.path as osp
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson
from sfepy.homogenization.utils import coor_to_sym, define_box_regions
import sfepy.discrete.fem.periodic as per
from sfepy.discrete.fem.mesh import Mesh
import sfepy.homogenization.coefs_base as cb
from sfepy.base.base import Struct
data_dir = 'example_poropiezo-1'
def data_to_struct(data):
out = {}
for k, v in data.items():
out[k] = Struct(name='output_data',
mode='cell' if v[2] == 'c' else 'vertex',
data=v[0],
var_name=v[1],
dofs=None)
return out
def get_periodic_bc(var_tab, dim=3, dim_tab=None):
if dim_tab is None:
dim_tab = {'x': ['left', 'right'],
'z': ['bottom', 'top'],
'y': ['near', 'far']}
periodic = {}
epbcs = {}
for ivar, reg in var_tab:
periodic['per_%s' % ivar] = pers = []
for idim in 'xyz'[0:dim]:
key = 'per_%s_%s' % (ivar, idim)
regs = ['%s_%s' % (reg, ii) for ii in dim_tab[idim]]
epbcs[key] = (regs, {'%s.all' % ivar: '%s.all' % ivar},
'match_%s_plane' % idim)
pers.append(key)
return epbcs, periodic
# reconstruct displacement, and electric fields at the microscopic level,
# see Section 6.2
def recovery_micro_dfc(pb, corrs, macro):
eps0 = macro['eps0']
mesh = pb.domain.mesh
regions = pb.domain.regions
dim = mesh.dim
Yms_map = regions['Yms'].get_entities(0)
Ym_map = regions['Ym'].get_entities(0)
gl = '_' + list(corrs.keys())[0].split('_')[-1]
u1 = -corrs['corrs_p' + gl]['u'] * macro['press'][Yms_map, :]
phi = -corrs['corrs_p' + gl]['r'] * macro['press'][Ym_map, :]
for ii in range(2):
u1 += corrs['corrs_k%d' % ii + gl]['u'] * macro['phi'][ii]
phi += corrs['corrs_k%d' % ii + gl]['r'] * macro['phi'][ii]
for ii in range(dim):
for jj in range(dim):
kk = coor_to_sym(ii, jj, dim)
phi += corrs['corrs_rs' + gl]['r_%d%d' % (ii, jj)]\
* nm.expand_dims(macro['strain'][Ym_map, kk], axis=1)
u1 += corrs['corrs_rs' + gl]['u_%d%d' % (ii, jj)]\
* nm.expand_dims(macro['strain'][Yms_map, kk], axis=1)
u = macro['u'][Yms_map, :] + eps0 * u1
mvar = pb.create_variables(['u', 'r', 'svar'])
e_mac_Yms = [None] * macro['strain'].shape[1]
for ii in range(dim):
for jj in range(dim):
kk = coor_to_sym(ii, jj, dim)
mvar['svar'].set_data(macro['strain'][:, kk])
mac_e_Yms = pb.evaluate('ev_volume_integrate.i2.Yms(svar)',
mode='el_avg',
var_dict={'svar': mvar['svar']})
e_mac_Yms[kk] = mac_e_Yms.squeeze()
e_mac_Yms = nm.vstack(e_mac_Yms).T[:, nm.newaxis, :, nm.newaxis]
mvar['r'].set_data(phi)
E_mic = pb.evaluate('ev_grad.i2.Ym(r)',
mode='el_avg',
var_dict={'r': mvar['r']}) / eps0
mvar['u'].set_data(u1)
e_mic = pb.evaluate('ev_cauchy_strain.i2.Yms(u)',
mode='el_avg',
var_dict={'u': mvar['u']})
e_mic += e_mac_Yms
out = {
'u0': (macro['u'][Yms_map, :], 'u', 'p'), # macro displacement
'u1': (u1, 'u', 'p'), # local displacement corrections, see eq. (58)
'u': (u, 'u', 'p'), # total displacement
'e_mic': (e_mic, 'u', 'c'), # micro strain field, see eq. (58)
'phi': (phi, 'r', 'p'), # electric potential, see eq. (57)
'E_mic': (E_mic, 'r', 'c'), # electric field, see eq. (58)
}
return data_to_struct(out)
# define homogenized coefficients and subproblems for correctors
def define(grid0=100, filename_mesh=None):
eps0 = 0.01 / grid0
if filename_mesh is None:
filename_mesh = osp.join(data_dir, 'piezo_mesh_micro_dfc.vtk')
mesh = | Mesh.from_file(filename_mesh) | sfepy.discrete.fem.mesh.Mesh.from_file |
# This example implements homogenization of piezoeletric porous media.
# The mathematical model and numerical results are described in:
#
# <NAME>., <NAME>.
# Homogenization of the fluid-saturated piezoelectric porous media.
# International Journal of Solids and Structures
# Volume 147, 15 August 2018, Pages 110-125
# https://doi.org/10.1016/j.ijsolstr.2018.05.017
#
# Run calculation of homogeized coefficients:
#
# ./homogen.py example_poropiezo-1/poropiezo_micro_dfc.py
#
# The results are stored in `example_poropiezo-1/results` directory.
#
import sys
import numpy as nm
import os.path as osp
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson
from sfepy.homogenization.utils import coor_to_sym, define_box_regions
import sfepy.discrete.fem.periodic as per
from sfepy.discrete.fem.mesh import Mesh
import sfepy.homogenization.coefs_base as cb
from sfepy.base.base import Struct
data_dir = 'example_poropiezo-1'
def data_to_struct(data):
out = {}
for k, v in data.items():
out[k] = Struct(name='output_data',
mode='cell' if v[2] == 'c' else 'vertex',
data=v[0],
var_name=v[1],
dofs=None)
return out
def get_periodic_bc(var_tab, dim=3, dim_tab=None):
if dim_tab is None:
dim_tab = {'x': ['left', 'right'],
'z': ['bottom', 'top'],
'y': ['near', 'far']}
periodic = {}
epbcs = {}
for ivar, reg in var_tab:
periodic['per_%s' % ivar] = pers = []
for idim in 'xyz'[0:dim]:
key = 'per_%s_%s' % (ivar, idim)
regs = ['%s_%s' % (reg, ii) for ii in dim_tab[idim]]
epbcs[key] = (regs, {'%s.all' % ivar: '%s.all' % ivar},
'match_%s_plane' % idim)
pers.append(key)
return epbcs, periodic
# reconstruct displacement, and electric fields at the microscopic level,
# see Section 6.2
def recovery_micro_dfc(pb, corrs, macro):
eps0 = macro['eps0']
mesh = pb.domain.mesh
regions = pb.domain.regions
dim = mesh.dim
Yms_map = regions['Yms'].get_entities(0)
Ym_map = regions['Ym'].get_entities(0)
gl = '_' + list(corrs.keys())[0].split('_')[-1]
u1 = -corrs['corrs_p' + gl]['u'] * macro['press'][Yms_map, :]
phi = -corrs['corrs_p' + gl]['r'] * macro['press'][Ym_map, :]
for ii in range(2):
u1 += corrs['corrs_k%d' % ii + gl]['u'] * macro['phi'][ii]
phi += corrs['corrs_k%d' % ii + gl]['r'] * macro['phi'][ii]
for ii in range(dim):
for jj in range(dim):
kk = coor_to_sym(ii, jj, dim)
phi += corrs['corrs_rs' + gl]['r_%d%d' % (ii, jj)]\
* nm.expand_dims(macro['strain'][Ym_map, kk], axis=1)
u1 += corrs['corrs_rs' + gl]['u_%d%d' % (ii, jj)]\
* nm.expand_dims(macro['strain'][Yms_map, kk], axis=1)
u = macro['u'][Yms_map, :] + eps0 * u1
mvar = pb.create_variables(['u', 'r', 'svar'])
e_mac_Yms = [None] * macro['strain'].shape[1]
for ii in range(dim):
for jj in range(dim):
kk = coor_to_sym(ii, jj, dim)
mvar['svar'].set_data(macro['strain'][:, kk])
mac_e_Yms = pb.evaluate('ev_volume_integrate.i2.Yms(svar)',
mode='el_avg',
var_dict={'svar': mvar['svar']})
e_mac_Yms[kk] = mac_e_Yms.squeeze()
e_mac_Yms = nm.vstack(e_mac_Yms).T[:, nm.newaxis, :, nm.newaxis]
mvar['r'].set_data(phi)
E_mic = pb.evaluate('ev_grad.i2.Ym(r)',
mode='el_avg',
var_dict={'r': mvar['r']}) / eps0
mvar['u'].set_data(u1)
e_mic = pb.evaluate('ev_cauchy_strain.i2.Yms(u)',
mode='el_avg',
var_dict={'u': mvar['u']})
e_mic += e_mac_Yms
out = {
'u0': (macro['u'][Yms_map, :], 'u', 'p'), # macro displacement
'u1': (u1, 'u', 'p'), # local displacement corrections, see eq. (58)
'u': (u, 'u', 'p'), # total displacement
'e_mic': (e_mic, 'u', 'c'), # micro strain field, see eq. (58)
'phi': (phi, 'r', 'p'), # electric potential, see eq. (57)
'E_mic': (E_mic, 'r', 'c'), # electric field, see eq. (58)
}
return data_to_struct(out)
# define homogenized coefficients and subproblems for correctors
def define(grid0=100, filename_mesh=None):
eps0 = 0.01 / grid0
if filename_mesh is None:
filename_mesh = osp.join(data_dir, 'piezo_mesh_micro_dfc.vtk')
mesh = Mesh.from_file(filename_mesh)
n_conduct = len(nm.unique(mesh.cmesh.cell_groups)) - 2
sym_eye = 'nm.array([1,1,0])' if mesh.dim == 2 else\
'nm.array([1,1,1,0,0,0])'
bbox = mesh.get_bounding_box()
regions = | define_box_regions(mesh.dim, bbox[0], bbox[1], eps=1e-3) | sfepy.homogenization.utils.define_box_regions |
# This example implements homogenization of piezoeletric porous media.
# The mathematical model and numerical results are described in:
#
# <NAME>., <NAME>.
# Homogenization of the fluid-saturated piezoelectric porous media.
# International Journal of Solids and Structures
# Volume 147, 15 August 2018, Pages 110-125
# https://doi.org/10.1016/j.ijsolstr.2018.05.017
#
# Run calculation of homogeized coefficients:
#
# ./homogen.py example_poropiezo-1/poropiezo_micro_dfc.py
#
# The results are stored in `example_poropiezo-1/results` directory.
#
import sys
import numpy as nm
import os.path as osp
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson
from sfepy.homogenization.utils import coor_to_sym, define_box_regions
import sfepy.discrete.fem.periodic as per
from sfepy.discrete.fem.mesh import Mesh
import sfepy.homogenization.coefs_base as cb
from sfepy.base.base import Struct
data_dir = 'example_poropiezo-1'
def data_to_struct(data):
out = {}
for k, v in data.items():
out[k] = Struct(name='output_data',
mode='cell' if v[2] == 'c' else 'vertex',
data=v[0],
var_name=v[1],
dofs=None)
return out
def get_periodic_bc(var_tab, dim=3, dim_tab=None):
if dim_tab is None:
dim_tab = {'x': ['left', 'right'],
'z': ['bottom', 'top'],
'y': ['near', 'far']}
periodic = {}
epbcs = {}
for ivar, reg in var_tab:
periodic['per_%s' % ivar] = pers = []
for idim in 'xyz'[0:dim]:
key = 'per_%s_%s' % (ivar, idim)
regs = ['%s_%s' % (reg, ii) for ii in dim_tab[idim]]
epbcs[key] = (regs, {'%s.all' % ivar: '%s.all' % ivar},
'match_%s_plane' % idim)
pers.append(key)
return epbcs, periodic
# reconstruct displacement, and electric fields at the microscopic level,
# see Section 6.2
def recovery_micro_dfc(pb, corrs, macro):
eps0 = macro['eps0']
mesh = pb.domain.mesh
regions = pb.domain.regions
dim = mesh.dim
Yms_map = regions['Yms'].get_entities(0)
Ym_map = regions['Ym'].get_entities(0)
gl = '_' + list(corrs.keys())[0].split('_')[-1]
u1 = -corrs['corrs_p' + gl]['u'] * macro['press'][Yms_map, :]
phi = -corrs['corrs_p' + gl]['r'] * macro['press'][Ym_map, :]
for ii in range(2):
u1 += corrs['corrs_k%d' % ii + gl]['u'] * macro['phi'][ii]
phi += corrs['corrs_k%d' % ii + gl]['r'] * macro['phi'][ii]
for ii in range(dim):
for jj in range(dim):
kk = | coor_to_sym(ii, jj, dim) | sfepy.homogenization.utils.coor_to_sym |
# This example implements homogenization of piezoeletric porous media.
# The mathematical model and numerical results are described in:
#
# <NAME>., <NAME>.
# Homogenization of the fluid-saturated piezoelectric porous media.
# International Journal of Solids and Structures
# Volume 147, 15 August 2018, Pages 110-125
# https://doi.org/10.1016/j.ijsolstr.2018.05.017
#
# Run calculation of homogeized coefficients:
#
# ./homogen.py example_poropiezo-1/poropiezo_micro_dfc.py
#
# The results are stored in `example_poropiezo-1/results` directory.
#
import sys
import numpy as nm
import os.path as osp
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson
from sfepy.homogenization.utils import coor_to_sym, define_box_regions
import sfepy.discrete.fem.periodic as per
from sfepy.discrete.fem.mesh import Mesh
import sfepy.homogenization.coefs_base as cb
from sfepy.base.base import Struct
data_dir = 'example_poropiezo-1'
def data_to_struct(data):
out = {}
for k, v in data.items():
out[k] = Struct(name='output_data',
mode='cell' if v[2] == 'c' else 'vertex',
data=v[0],
var_name=v[1],
dofs=None)
return out
def get_periodic_bc(var_tab, dim=3, dim_tab=None):
if dim_tab is None:
dim_tab = {'x': ['left', 'right'],
'z': ['bottom', 'top'],
'y': ['near', 'far']}
periodic = {}
epbcs = {}
for ivar, reg in var_tab:
periodic['per_%s' % ivar] = pers = []
for idim in 'xyz'[0:dim]:
key = 'per_%s_%s' % (ivar, idim)
regs = ['%s_%s' % (reg, ii) for ii in dim_tab[idim]]
epbcs[key] = (regs, {'%s.all' % ivar: '%s.all' % ivar},
'match_%s_plane' % idim)
pers.append(key)
return epbcs, periodic
# reconstruct displacement, and electric fields at the microscopic level,
# see Section 6.2
def recovery_micro_dfc(pb, corrs, macro):
eps0 = macro['eps0']
mesh = pb.domain.mesh
regions = pb.domain.regions
dim = mesh.dim
Yms_map = regions['Yms'].get_entities(0)
Ym_map = regions['Ym'].get_entities(0)
gl = '_' + list(corrs.keys())[0].split('_')[-1]
u1 = -corrs['corrs_p' + gl]['u'] * macro['press'][Yms_map, :]
phi = -corrs['corrs_p' + gl]['r'] * macro['press'][Ym_map, :]
for ii in range(2):
u1 += corrs['corrs_k%d' % ii + gl]['u'] * macro['phi'][ii]
phi += corrs['corrs_k%d' % ii + gl]['r'] * macro['phi'][ii]
for ii in range(dim):
for jj in range(dim):
kk = coor_to_sym(ii, jj, dim)
phi += corrs['corrs_rs' + gl]['r_%d%d' % (ii, jj)]\
* nm.expand_dims(macro['strain'][Ym_map, kk], axis=1)
u1 += corrs['corrs_rs' + gl]['u_%d%d' % (ii, jj)]\
* nm.expand_dims(macro['strain'][Yms_map, kk], axis=1)
u = macro['u'][Yms_map, :] + eps0 * u1
mvar = pb.create_variables(['u', 'r', 'svar'])
e_mac_Yms = [None] * macro['strain'].shape[1]
for ii in range(dim):
for jj in range(dim):
kk = | coor_to_sym(ii, jj, dim) | sfepy.homogenization.utils.coor_to_sym |
# This example implements homogenization of piezoeletric porous media.
# The mathematical model and numerical results are described in:
#
# <NAME>., <NAME>.
# Homogenization of the fluid-saturated piezoelectric porous media.
# International Journal of Solids and Structures
# Volume 147, 15 August 2018, Pages 110-125
# https://doi.org/10.1016/j.ijsolstr.2018.05.017
#
# Run calculation of homogeized coefficients:
#
# ./homogen.py example_poropiezo-1/poropiezo_micro_dfc.py
#
# The results are stored in `example_poropiezo-1/results` directory.
#
import sys
import numpy as nm
import os.path as osp
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson
from sfepy.homogenization.utils import coor_to_sym, define_box_regions
import sfepy.discrete.fem.periodic as per
from sfepy.discrete.fem.mesh import Mesh
import sfepy.homogenization.coefs_base as cb
from sfepy.base.base import Struct
data_dir = 'example_poropiezo-1'
def data_to_struct(data):
out = {}
for k, v in data.items():
out[k] = Struct(name='output_data',
mode='cell' if v[2] == 'c' else 'vertex',
data=v[0],
var_name=v[1],
dofs=None)
return out
def get_periodic_bc(var_tab, dim=3, dim_tab=None):
if dim_tab is None:
dim_tab = {'x': ['left', 'right'],
'z': ['bottom', 'top'],
'y': ['near', 'far']}
periodic = {}
epbcs = {}
for ivar, reg in var_tab:
periodic['per_%s' % ivar] = pers = []
for idim in 'xyz'[0:dim]:
key = 'per_%s_%s' % (ivar, idim)
regs = ['%s_%s' % (reg, ii) for ii in dim_tab[idim]]
epbcs[key] = (regs, {'%s.all' % ivar: '%s.all' % ivar},
'match_%s_plane' % idim)
pers.append(key)
return epbcs, periodic
# reconstruct displacement, and electric fields at the microscopic level,
# see Section 6.2
def recovery_micro_dfc(pb, corrs, macro):
eps0 = macro['eps0']
mesh = pb.domain.mesh
regions = pb.domain.regions
dim = mesh.dim
Yms_map = regions['Yms'].get_entities(0)
Ym_map = regions['Ym'].get_entities(0)
gl = '_' + list(corrs.keys())[0].split('_')[-1]
u1 = -corrs['corrs_p' + gl]['u'] * macro['press'][Yms_map, :]
phi = -corrs['corrs_p' + gl]['r'] * macro['press'][Ym_map, :]
for ii in range(2):
u1 += corrs['corrs_k%d' % ii + gl]['u'] * macro['phi'][ii]
phi += corrs['corrs_k%d' % ii + gl]['r'] * macro['phi'][ii]
for ii in range(dim):
for jj in range(dim):
kk = coor_to_sym(ii, jj, dim)
phi += corrs['corrs_rs' + gl]['r_%d%d' % (ii, jj)]\
* nm.expand_dims(macro['strain'][Ym_map, kk], axis=1)
u1 += corrs['corrs_rs' + gl]['u_%d%d' % (ii, jj)]\
* nm.expand_dims(macro['strain'][Yms_map, kk], axis=1)
u = macro['u'][Yms_map, :] + eps0 * u1
mvar = pb.create_variables(['u', 'r', 'svar'])
e_mac_Yms = [None] * macro['strain'].shape[1]
for ii in range(dim):
for jj in range(dim):
kk = coor_to_sym(ii, jj, dim)
mvar['svar'].set_data(macro['strain'][:, kk])
mac_e_Yms = pb.evaluate('ev_volume_integrate.i2.Yms(svar)',
mode='el_avg',
var_dict={'svar': mvar['svar']})
e_mac_Yms[kk] = mac_e_Yms.squeeze()
e_mac_Yms = nm.vstack(e_mac_Yms).T[:, nm.newaxis, :, nm.newaxis]
mvar['r'].set_data(phi)
E_mic = pb.evaluate('ev_grad.i2.Ym(r)',
mode='el_avg',
var_dict={'r': mvar['r']}) / eps0
mvar['u'].set_data(u1)
e_mic = pb.evaluate('ev_cauchy_strain.i2.Yms(u)',
mode='el_avg',
var_dict={'u': mvar['u']})
e_mic += e_mac_Yms
out = {
'u0': (macro['u'][Yms_map, :], 'u', 'p'), # macro displacement
'u1': (u1, 'u', 'p'), # local displacement corrections, see eq. (58)
'u': (u, 'u', 'p'), # total displacement
'e_mic': (e_mic, 'u', 'c'), # micro strain field, see eq. (58)
'phi': (phi, 'r', 'p'), # electric potential, see eq. (57)
'E_mic': (E_mic, 'r', 'c'), # electric field, see eq. (58)
}
return data_to_struct(out)
# define homogenized coefficients and subproblems for correctors
def define(grid0=100, filename_mesh=None):
eps0 = 0.01 / grid0
if filename_mesh is None:
filename_mesh = osp.join(data_dir, 'piezo_mesh_micro_dfc.vtk')
mesh = Mesh.from_file(filename_mesh)
n_conduct = len(nm.unique(mesh.cmesh.cell_groups)) - 2
sym_eye = 'nm.array([1,1,0])' if mesh.dim == 2 else\
'nm.array([1,1,1,0,0,0])'
bbox = mesh.get_bounding_box()
regions = define_box_regions(mesh.dim, bbox[0], bbox[1], eps=1e-3)
regions.update({
'Y': 'all',
# matrix
'Ym': 'cells of group 1',
'Ym_left': ('r.Ym *v r.Left', 'vertex'),
'Ym_right': ('r.Ym *v r.Right', 'vertex'),
'Ym_bottom': ('r.Ym *v r.Bottom', 'vertex'),
'Ym_top': ('r.Ym *v r.Top', 'vertex'),
'Ym_far': ('r.Ym *v r.Far', 'vertex'),
'Ym_near': ('r.Ym *v r.Near', 'vertex'),
'Gamma_mc': ('r.Ym *v r.Yc', 'facet', 'Ym'),
# channel / inclusion
'Yc': 'cells of group 2',
'Yc0': ('r.Yc -v r.Gamma_cm', 'vertex'),
'Gamma_cm': ('r.Ym *v r.Yc', 'facet', 'Yc'),
})
print('number of cnonductors: %d' % n_conduct)
regions.update({
'Yms': ('r.Ym +v r.Ys', 'cell'),
'Yms_left': ('r.Yms *v r.Left', 'vertex'),
'Yms_right': ('r.Yms *v r.Right', 'vertex'),
'Yms_bottom': ('r.Yms *v r.Bottom', 'vertex'),
'Yms_top': ('r.Yms *v r.Top', 'vertex'),
'Yms_far': ('r.Yms *v r.Far', 'vertex'),
'Yms_near': ('r.Yms *v r.Near', 'vertex'),
'Gamma_ms': ('r.Ym *v r.Ys', 'facet', 'Ym'),
'Gamma_msc': ('r.Yms *v r.Yc', 'facet', 'Yms'),
'Ys': (' +v '.join(['r.Ys%d' % k for k in range(n_conduct)]),
'cell'),
})
options = {
'coefs_filename': 'coefs_poropiezo_%d' % (grid0),
'volume': {
'variables': ['svar'],
'expression': 'd_volume.i2.Y(svar)',
},
'coefs': 'coefs',
'requirements': 'requirements',
'output_dir': osp.join(data_dir, 'results'),
'ls': 'ls',
'file_per_var': True,
'absolute_mesh_path': True,
'multiprocessing': False,
'recovery_hook': recovery_micro_dfc,
}
fields = {
'displacement': ('real', 'vector', 'Yms', 1),
'potential': ('real', 'scalar', 'Ym', 1),
'sfield': ('real', 'scalar', 'Y', 1),
}
variables = {
# displacement
'u': ('unknown field', 'displacement'),
'v': ('test field', 'displacement', 'u'),
'Pi_u': ('parameter field', 'displacement', 'u'),
'U1': ('parameter field', 'displacement', '(set-to-None)'),
'U2': ('parameter field', 'displacement', '(set-to-None)'),
# potential
'r': ('unknown field', 'potential'),
's': ('test field', 'potential', 'r'),
'Pi_r': ('parameter field', 'potential', 'r'),
'R1': ('parameter field', 'potential', '(set-to-None)'),
'R2': ('parameter field', 'potential', '(set-to-None)'),
# aux variable
'svar': ('parameter field', 'sfield', '(set-to-None)'),
}
epbcs, periodic = get_periodic_bc([('u', 'Yms'), ('r', 'Ym')])
mat_g_sc, mat_d_sc = eps0, eps0**2
# BaTiO3 - Miara, Rohan, ... doi: 10.1016/j.jmps.2005.05.006
materials = {
'matrix': ({
'D': {'Ym': nm.array([[1.504, 0.656, 0.659, 0, 0, 0],
[0.656, 1.504, 0.659, 0, 0, 0],
[0.659, 0.659, 1.455, 0, 0, 0],
[0, 0, 0, 0.424, 0, 0],
[0, 0, 0, 0, 0.439, 0],
[0, 0, 0, 0, 0, 0.439]]) * 1e11, }
},),
'piezo': ({
'g': nm.array([[0, 0, 0, 0, 11.404, 0],
[0, 0, 0, 0, 0, 11.404],
[-4.322, -4.322, 17.360, 0, 0, 0]]) / mat_g_sc,
'd': nm.array([[1.284, 0, 0],
[0, 1.284, 0],
[0, 0, 1.505]]) * 1e-8 / mat_d_sc,
},),
'fluid': ({'gamma': 1.0 / 2.15e9},),
}
functions = {
'match_x_plane': (per.match_x_plane,),
'match_y_plane': (per.match_y_plane,),
'match_z_plane': (per.match_z_plane,),
}
ebcs = {
'fixed_u': ('Corners', {'u.all': 0.0}),
'fixed_r': ('Gamma_ms', {'r.all': 0.0}),
}
integrals = {
'i2': 2,
'i5': 5,
}
solvers = {
'ls': ('ls.scipy_direct', {}),
'ns_em6': ('nls.newton', {
'i_max': 1,
'eps_a': 1e-6,
'eps_r': 1e-6,
'problem': 'nonlinear'}),
'ns_em3': ('nls.newton', {
'i_max': 1,
'eps_a': 1e-3,
'eps_r': 1e-6,
'problem': 'nonlinear'}),
}
coefs = {
# homogenized elasticity, see eq. (46)_1
'A': {
'requires': ['c.A1', 'c.A2'],
'expression': 'c.A1 + c.A2',
'class': cb.CoefEval,
},
'A1': {
'status': 'auxiliary',
'requires': ['pis_u', 'corrs_rs'],
'expression': 'dw_lin_elastic.i2.Yms(matrix.D, U1, U2)',
'set_variables': [('U1', ('corrs_rs', 'pis_u'), 'u'),
('U2', ('corrs_rs', 'pis_u'), 'u')],
'class': cb.CoefSymSym,
},
'A2': {
'status': 'auxiliary',
'requires': ['corrs_rs'],
'expression': 'dw_diffusion.i2.Ym(piezo.d, R1, R2)',
'set_variables': [('R1', 'corrs_rs', 'r'),
('R2', 'corrs_rs', 'r')],
'class': cb.CoefSymSym,
},
# homogenized Biot coefficient, see eq. (46)_2
'B': {
'requires': ['c.Phi', 'c.B1', 'c.B2'],
'expression': 'c.B1 - c.B2 + c.Phi * %s' % sym_eye,
'class': cb.CoefEval,
},
'B1': {
'status': 'auxiliary',
'requires': ['pis_u', 'corrs_p'],
'expression': 'dw_lin_elastic.i2.Yms(matrix.D, U1, U2)',
'set_variables': [('U1', 'corrs_p', 'u'),
('U2', 'pis_u', 'u')],
'class': cb.CoefSym,
},
'B2': {
'status': 'auxiliary',
'requires': ['pis_u', 'corrs_p'],
'expression': 'dw_piezo_coupling.i2.Ym(piezo.g, U1, R1)',
'set_variables': [('R1', 'corrs_p', 'r'),
('U1', 'pis_u', 'u')],
'class': cb.CoefSym,
},
# homogenized compressibility coefficient, see eq. (46)_6
'M': {
'requires': ['c.Phi', 'c.N'],
'expression': 'c.N + c.Phi * %e' % materials['fluid'][0]['gamma'],
'class': cb.CoefEval,
},
'N': {
'status': 'auxiliary',
'requires': ['corrs_p'],
'expression': 'dw_surface_ltr.i2.Gamma_msc(U1)',
'set_variables': [('U1', 'corrs_p', 'u')],
'class': cb.CoefOne,
},
'Phi': {
'requires': ['c.vol'],
'expression': 'c.vol["fraction_Yc"]',
'class': cb.CoefEval,
},
# volume fractions of Ym, Yc, Ys1, Ys2, ...
'vol': {
'regions': ['Ym', 'Yc'] + ['Ys%d' % k for k in range(n_conduct)],
'expression': 'd_volume.i2.%s(svar)',
'class': cb.VolumeFractions,
},
'eps0': {
'requires': [],
'expression': '%e' % eps0,
'class': cb.CoefEval,
},
'filenames': {},
}
requirements = {
'pis_u': {
'variables': ['u'],
'class': cb.ShapeDimDim,
},
'pis_r': {
'variables': ['r'],
'class': cb.ShapeDim,
},
# local subproblem defined by eq. (41)
'corrs_rs': {
'requires': ['pis_u'],
'ebcs': ['fixed_u', 'fixed_r'],
'epbcs': periodic['per_u'] + periodic['per_r'],
'is_linear': True,
'equations': {
'eq1':
"""dw_lin_elastic.i2.Yms(matrix.D, v, u)
- dw_piezo_coupling.i2.Ym(piezo.g, v, r)
= - dw_lin_elastic.i2.Yms(matrix.D, v, Pi_u)""",
'eq2':
"""
- dw_piezo_coupling.i2.Ym(piezo.g, u, s)
- dw_diffusion.i2.Ym(piezo.d, s, r)
= dw_piezo_coupling.i2.Ym(piezo.g, Pi_u, s)""",
},
'set_variables': [('Pi_u', 'pis_u', 'u')],
'class': cb.CorrDimDim,
'save_name': 'corrs_rs_%d' % grid0,
'dump_variables': ['u', 'r'],
'solvers': {'ls': 'ls', 'nls': 'ns_em3'},
},
# local subproblem defined by eq. (42)
'corrs_p': {
'requires': [],
'ebcs': ['fixed_u', 'fixed_r'],
'epbcs': periodic['per_u'] + periodic['per_r'],
'is_linear': True,
'equations': {
'eq1':
"""dw_lin_elastic.i2.Yms(matrix.D, v, u)
- dw_piezo_coupling.i2.Ym(piezo.g, v, r)
= dw_surface_ltr.i2.Gamma_msc(v)""",
'eq2':
"""
- dw_piezo_coupling.i2.Ym(piezo.g, u, s)
- dw_diffusion.i2.Ym(piezo.d, s, r)
= 0"""
},
'class': cb.CorrOne,
'save_name': 'corrs_p_%d' % grid0,
'dump_variables': ['u', 'r'],
'solvers': {'ls': 'ls', 'nls': 'ns_em6'},
},
# local subproblem defined by eq. (43)
'corrs_rho': {
'requires': [],
'ebcs': ['fixed_u', 'fixed_r'],
'epbcs': periodic['per_u'] + periodic['per_r'],
'is_linear': True,
'equations': {
'eq1':
"""dw_lin_elastic.i2.Yms(matrix.D, v, u)
- dw_piezo_coupling.i2.Ym(piezo.g, v, r)
= 0""",
'eq2':
"""
- dw_piezo_coupling.i2.Ym(piezo.g, u, s)
- dw_diffusion.i2.Ym(piezo.d, s, r)
=
- dw_surface_integrate.i2.Gamma_mc(s)"""
},
'class': cb.CorrOne,
'save_name': 'corrs_p_%d' % grid0,
'dump_variables': ['u', 'r'],
'solvers': {'ls': 'ls', 'nls': 'ns_em6'},
},
}
for k in range(n_conduct):
sk = '%d' % k
regions.update({
'Ys' + sk: 'cells of group %d' % (3 + k),
'Gamma_s' + sk: ('r.Ym *v r.Ys' + sk, 'facet', 'Ym'),
})
materials['matrix'][0]['D'].update({
'Ys' + sk: | stiffness_from_youngpoisson(3, 200e9, 0.25) | sfepy.mechanics.matcoefs.stiffness_from_youngpoisson |
# Vibroacoustics
#
# E.Rohan, V.Lukeš
# Homogenization of the vibro–acoustic transmission on periodically
# perforated elastic plates with arrays of resonators.
# https://arxiv.org/abs/2104.01367 (arXiv:2104.01367v1)
import os
import numpy as nm
from sfepy.base.base import Struct
from sfepy.homogenization.coefficients import Coefficients
from sfepy.discrete.fem import Mesh, FEDomain
def coefs2qp(out, coefs, nqp):
others = {}
for k, v in coefs.items():
if type(v) is nm.float64:
v = nm.array(v)
if type(v) is not nm.ndarray:
others[k] = v
continue
if k[0] == 's':
out[k] = nm.tile(v, (nqp, 1, 1))
else:
if not(k in out):
out[k] = nm.tile(v, (nqp, 1, 1))
out.update(others)
return out
def get_homogmat(coors, mode, pb, coefs_filename, omega=None):
if mode == 'qp':
nqp = coors.shape[0]
outdir = pb.conf.options['output_dir']
cfname = os.path.join(outdir, coefs_filename + '.h5')
out = {}
print('>>> coefs from: ', cfname)
coefs_ = Coefficients.from_file_hdf5(cfname).to_dict()
coefs = {}
if 'omega' in coefs_ and omega is not None:
idx = (nm.abs(coefs_['omega'] - omega)).argmin()
rerr = nm.abs(coefs_['omega'][idx] - omega) / omega
if rerr > 1e-3:
raise ValueError('omega: given=%e, found=%e'
% (omega, coefs_['omega'][idx]))
print('found coeficcients for w=%e' % coefs_['omega'][idx])
del(coefs_['omega'])
else:
idx = 4 # magic index?
for k, v in coefs_.items():
if isinstance(v, nm.ndarray) and len(v.shape) == 3:
coefs[k] = v[idx, ...]
else:
coefs[k] = v
coefs2qp(out, coefs, nqp)
transpose = [k for k, v in out.items()
if type(v) == nm.ndarray and (v.shape[-1] > v.shape[-2])]
for k in transpose:
out[k] = out[k].transpose((0, 2, 1))
return out
def read_dict_hdf5(filename, level=0, group=None, fd=None):
import tables as pt
out = {}
if level == 0:
# fd = pt.openFile(filename, mode='r')
fd = pt.open_file(filename, mode='r')
group = fd.root
for name, gr in group._v_groups.items():
name = name.replace('_', '', 1)
out[name] = read_dict_hdf5(filename, level + 1, gr, fd)
for name, data in group._v_leaves.items():
name = name.replace('_', '', 1)
out[name] = data.read()
if level == 0:
fd.close()
return out
def eval_phi(pb, state_p1, state_p2, p_inc):
pvars = pb.create_variables(['P1', 'P2'])
# transmission loss function: log10(|p_in|^2/|p_out|^2)
pvars['P2'].set_data(nm.ones_like(state_p2) * p_inc**2)
phi_In = pb.evaluate('ev_surface_integrate.5.GammaIn(P2)',
P2=pvars['P2'])
pvars['P1'].set_data(state_p1**2)
phi_Out = pb.evaluate('ev_surface_integrate.5.GammaOut(P1)',
P1=pvars['P1'])
return 10.0 * nm.log10(nm.absolute(phi_In) / nm.absolute(phi_Out))
def post_process(out, pb, state, save_var0='p0'):
rmap = {'g01': 0, 'g02': 0, 'g0': 0, 'dp0': 0, 'sp0': 0, 'p0': 0,
'px': 1, 'p1': 1, 'p2': 2}
for k in out.keys():
if 'real_' in k or 'imag_' in k:
newk = k[:4] + '.' + k[5:]
out[newk] = out[k]
del(out[k])
midfn = pb.conf.filename_mesh_plate
fname, _ = os.path.splitext(os.path.basename(midfn))
fname = os.path.join(pb.output_dir, fname + '.h5')
aux = []
for k, v in read_dict_hdf5(fname)['step0'].items():
if ('real' in k) or ('imag' in k):
aux.append(k)
vn = k.strip('_').split('_')
key = '%s.%s' % tuple(vn)
if key not in out:
out[key] = Struct(name=v['name'].decode('ascii'),
mode=v['mode'].decode('ascii'),
dofs=[j.decode('ascii') for j in v['dofs']],
var_name=v['varname'].decode('ascii'),
shape=v['shape'],
data=v['data'],
dname=v['dname'])
if 'imag' in k:
rmap[vn[1]] = 0
absvars = [ii[4:] for ii in out.keys() if ii[0:4] == 'imag']
for ii in absvars:
if type(out['real' + ii]) is dict:
rpart = out.pop('real' + ii)
rdata = rpart['data']
ipart = out.pop('imag' + ii)
idata = ipart['data']
dim = rdata.shape[1]
varname = save_var0
if dim > 1:
aux = nm.zeros((rdata.shape[0], 1), dtype=nm.float64)
data = rdata if dim < 2 else nm.hstack((rdata, aux))
out['real' + ii] = Struct(name=rpart['name'],
mode=rpart['mode'],
dofs=rpart['dofs'],
var_name=varname,
data=data.copy())
data = idata if dim < 2 else nm.hstack((idata, aux))
out['imag' + ii] = Struct(name=ipart['name'],
mode=ipart['mode'],
dofs=ipart['dofs'],
var_name=varname,
data=data.copy())
else:
rpart = out['real' + ii].__dict__
rdata = rpart['data']
ipart = out['imag' + ii].__dict__
idata = ipart['data']
varname = rpart['var_name']
absval = nm.absolute(rdata + 1j*idata)
if rdata.shape[1] > 1:
aux = nm.zeros((rpart['data'].shape[0], 1), dtype=nm.float64)
absval = nm.hstack((absval, aux))
out[ii[1:]] = Struct(name=rpart['name'],
mode=rpart['mode'],
dofs=rpart['dofs'],
var_name=varname,
data=absval.copy())
# all plate variables as save_var0
for k in out.keys():
k0 = k.replace('imag.', '').replace('real.', '')
if rmap[k0] == 0:
out[k].var_name = save_var0
return out
def get_region_entities(rvar, noff=0):
reg = rvar.field.region
mesh = reg.domain.mesh
rnodes = reg.entities[0]
coors = mesh.coors
ngrp = mesh.cmesh.vertex_groups.squeeze()
descs = mesh.descs[0]
rcells = reg.entities[-1]
rconn = mesh.get_conn(descs)[rcells]
mat_ids = mesh.cmesh.cell_groups[rcells]
remap = -nm.ones((nm.max(rnodes) + 1,), dtype=nm.int64)
remap[rnodes] = nm.arange(rnodes.shape[0]) + noff
rconn = remap[rconn]
nmap = nm.where(remap >= 0)[0]
return coors[rnodes, :], ngrp[rnodes], rconn, mat_ids, descs, nmap
def generate_plate_mesh(fname):
dim_tab = {'3_4': '2_3', '3_8': '2_4'}
mesh3d = | Mesh.from_file(fname) | sfepy.discrete.fem.Mesh.from_file |
# Vibroacoustics
#
# E.Rohan, V.Lukeš
# Homogenization of the vibro–acoustic transmission on periodically
# perforated elastic plates with arrays of resonators.
# https://arxiv.org/abs/2104.01367 (arXiv:2104.01367v1)
import os
import numpy as nm
from sfepy.base.base import Struct
from sfepy.homogenization.coefficients import Coefficients
from sfepy.discrete.fem import Mesh, FEDomain
def coefs2qp(out, coefs, nqp):
others = {}
for k, v in coefs.items():
if type(v) is nm.float64:
v = nm.array(v)
if type(v) is not nm.ndarray:
others[k] = v
continue
if k[0] == 's':
out[k] = nm.tile(v, (nqp, 1, 1))
else:
if not(k in out):
out[k] = nm.tile(v, (nqp, 1, 1))
out.update(others)
return out
def get_homogmat(coors, mode, pb, coefs_filename, omega=None):
if mode == 'qp':
nqp = coors.shape[0]
outdir = pb.conf.options['output_dir']
cfname = os.path.join(outdir, coefs_filename + '.h5')
out = {}
print('>>> coefs from: ', cfname)
coefs_ = Coefficients.from_file_hdf5(cfname).to_dict()
coefs = {}
if 'omega' in coefs_ and omega is not None:
idx = (nm.abs(coefs_['omega'] - omega)).argmin()
rerr = nm.abs(coefs_['omega'][idx] - omega) / omega
if rerr > 1e-3:
raise ValueError('omega: given=%e, found=%e'
% (omega, coefs_['omega'][idx]))
print('found coeficcients for w=%e' % coefs_['omega'][idx])
del(coefs_['omega'])
else:
idx = 4 # magic index?
for k, v in coefs_.items():
if isinstance(v, nm.ndarray) and len(v.shape) == 3:
coefs[k] = v[idx, ...]
else:
coefs[k] = v
coefs2qp(out, coefs, nqp)
transpose = [k for k, v in out.items()
if type(v) == nm.ndarray and (v.shape[-1] > v.shape[-2])]
for k in transpose:
out[k] = out[k].transpose((0, 2, 1))
return out
def read_dict_hdf5(filename, level=0, group=None, fd=None):
import tables as pt
out = {}
if level == 0:
# fd = pt.openFile(filename, mode='r')
fd = pt.open_file(filename, mode='r')
group = fd.root
for name, gr in group._v_groups.items():
name = name.replace('_', '', 1)
out[name] = read_dict_hdf5(filename, level + 1, gr, fd)
for name, data in group._v_leaves.items():
name = name.replace('_', '', 1)
out[name] = data.read()
if level == 0:
fd.close()
return out
def eval_phi(pb, state_p1, state_p2, p_inc):
pvars = pb.create_variables(['P1', 'P2'])
# transmission loss function: log10(|p_in|^2/|p_out|^2)
pvars['P2'].set_data(nm.ones_like(state_p2) * p_inc**2)
phi_In = pb.evaluate('ev_surface_integrate.5.GammaIn(P2)',
P2=pvars['P2'])
pvars['P1'].set_data(state_p1**2)
phi_Out = pb.evaluate('ev_surface_integrate.5.GammaOut(P1)',
P1=pvars['P1'])
return 10.0 * nm.log10(nm.absolute(phi_In) / nm.absolute(phi_Out))
def post_process(out, pb, state, save_var0='p0'):
rmap = {'g01': 0, 'g02': 0, 'g0': 0, 'dp0': 0, 'sp0': 0, 'p0': 0,
'px': 1, 'p1': 1, 'p2': 2}
for k in out.keys():
if 'real_' in k or 'imag_' in k:
newk = k[:4] + '.' + k[5:]
out[newk] = out[k]
del(out[k])
midfn = pb.conf.filename_mesh_plate
fname, _ = os.path.splitext(os.path.basename(midfn))
fname = os.path.join(pb.output_dir, fname + '.h5')
aux = []
for k, v in read_dict_hdf5(fname)['step0'].items():
if ('real' in k) or ('imag' in k):
aux.append(k)
vn = k.strip('_').split('_')
key = '%s.%s' % tuple(vn)
if key not in out:
out[key] = Struct(name=v['name'].decode('ascii'),
mode=v['mode'].decode('ascii'),
dofs=[j.decode('ascii') for j in v['dofs']],
var_name=v['varname'].decode('ascii'),
shape=v['shape'],
data=v['data'],
dname=v['dname'])
if 'imag' in k:
rmap[vn[1]] = 0
absvars = [ii[4:] for ii in out.keys() if ii[0:4] == 'imag']
for ii in absvars:
if type(out['real' + ii]) is dict:
rpart = out.pop('real' + ii)
rdata = rpart['data']
ipart = out.pop('imag' + ii)
idata = ipart['data']
dim = rdata.shape[1]
varname = save_var0
if dim > 1:
aux = nm.zeros((rdata.shape[0], 1), dtype=nm.float64)
data = rdata if dim < 2 else nm.hstack((rdata, aux))
out['real' + ii] = Struct(name=rpart['name'],
mode=rpart['mode'],
dofs=rpart['dofs'],
var_name=varname,
data=data.copy())
data = idata if dim < 2 else nm.hstack((idata, aux))
out['imag' + ii] = Struct(name=ipart['name'],
mode=ipart['mode'],
dofs=ipart['dofs'],
var_name=varname,
data=data.copy())
else:
rpart = out['real' + ii].__dict__
rdata = rpart['data']
ipart = out['imag' + ii].__dict__
idata = ipart['data']
varname = rpart['var_name']
absval = nm.absolute(rdata + 1j*idata)
if rdata.shape[1] > 1:
aux = nm.zeros((rpart['data'].shape[0], 1), dtype=nm.float64)
absval = nm.hstack((absval, aux))
out[ii[1:]] = Struct(name=rpart['name'],
mode=rpart['mode'],
dofs=rpart['dofs'],
var_name=varname,
data=absval.copy())
# all plate variables as save_var0
for k in out.keys():
k0 = k.replace('imag.', '').replace('real.', '')
if rmap[k0] == 0:
out[k].var_name = save_var0
return out
def get_region_entities(rvar, noff=0):
reg = rvar.field.region
mesh = reg.domain.mesh
rnodes = reg.entities[0]
coors = mesh.coors
ngrp = mesh.cmesh.vertex_groups.squeeze()
descs = mesh.descs[0]
rcells = reg.entities[-1]
rconn = mesh.get_conn(descs)[rcells]
mat_ids = mesh.cmesh.cell_groups[rcells]
remap = -nm.ones((nm.max(rnodes) + 1,), dtype=nm.int64)
remap[rnodes] = nm.arange(rnodes.shape[0]) + noff
rconn = remap[rconn]
nmap = nm.where(remap >= 0)[0]
return coors[rnodes, :], ngrp[rnodes], rconn, mat_ids, descs, nmap
def generate_plate_mesh(fname):
dim_tab = {'3_4': '2_3', '3_8': '2_4'}
mesh3d = Mesh.from_file(fname)
domain = | FEDomain('domain', mesh3d) | sfepy.discrete.fem.FEDomain |
# Vibroacoustics
#
# E.Rohan, V.Lukeš
# Homogenization of the vibro–acoustic transmission on periodically
# perforated elastic plates with arrays of resonators.
# https://arxiv.org/abs/2104.01367 (arXiv:2104.01367v1)
import os
import numpy as nm
from sfepy.base.base import Struct
from sfepy.homogenization.coefficients import Coefficients
from sfepy.discrete.fem import Mesh, FEDomain
def coefs2qp(out, coefs, nqp):
others = {}
for k, v in coefs.items():
if type(v) is nm.float64:
v = nm.array(v)
if type(v) is not nm.ndarray:
others[k] = v
continue
if k[0] == 's':
out[k] = nm.tile(v, (nqp, 1, 1))
else:
if not(k in out):
out[k] = nm.tile(v, (nqp, 1, 1))
out.update(others)
return out
def get_homogmat(coors, mode, pb, coefs_filename, omega=None):
if mode == 'qp':
nqp = coors.shape[0]
outdir = pb.conf.options['output_dir']
cfname = os.path.join(outdir, coefs_filename + '.h5')
out = {}
print('>>> coefs from: ', cfname)
coefs_ = | Coefficients.from_file_hdf5(cfname) | sfepy.homogenization.coefficients.Coefficients.from_file_hdf5 |
import numpy as nm
from sfepy.base.base import OneTypeList, Container, Struct
class Functions(Container):
"""Container to hold all user-defined functions."""
def from_conf(conf):
objs = | OneTypeList(Function) | sfepy.base.base.OneTypeList |
r"""
Poisson equation.
This example demonstrates parametric study capabilities of Application
classes. In particular (written in the strong form):
.. math::
c \Delta t = f \mbox{ in } \Omega,
t = 2 \mbox{ on } \Gamma_1 \;,
t = -2 \mbox{ on } \Gamma_2 \;,
f = 1 \mbox{ in } \Omega_1 \;,
f = 0 \mbox{ otherwise,}
where :math:`\Omega` is a square domain, :math:`\Omega_1 \in \Omega` is
a circular domain.
Now let's see what happens if :math:`\Omega_1` diameter changes.
Run::
$ ./simple.py <this file>
and then look in 'output/r_omega1' directory, try for example::
$ ./postproc.py output/r_omega1/circles_in_square*.vtk
Remark: this simple case could be achieved also by defining
:math:`\Omega_1` by a time-dependent function and solve the static
problem as a time-dependent problem. However, the approach below is much
more general.
Find :math:`t` such that:
.. math::
\int_{\Omega} c \nabla s \cdot \nabla t
= 0
\;, \quad \forall s \;.
"""
from __future__ import absolute_import
import os
import numpy as nm
from sfepy import data_dir
from sfepy.base.base import output
# Mesh.
filename_mesh = data_dir + '/meshes/2d/special/circles_in_square.vtk'
# Options. The value of 'parametric_hook' is the function that does the
# parametric study.
options = {
'nls' : 'newton', # Nonlinear solver
'ls' : 'ls', # Linear solver
'parametric_hook' : 'vary_omega1_size',
'output_dir' : 'output/r_omega1',
}
# Domain and subdomains.
default_diameter = 0.25
regions = {
'Omega' : 'all',
'Gamma_1' : ('vertices in (x < -0.999)', 'facet'),
'Gamma_2' : ('vertices in (x > 0.999)', 'facet'),
'Omega_1' : 'vertices by select_circ',
}
# FE field defines the FE approximation: 2_3_P1 = 2D, P1 on triangles.
field_1 = {
'name' : 'temperature',
'dtype' : 'real',
'shape' : (1,),
'region' : 'Omega',
'approx_order' : 1,
}
# Unknown and test functions (FE sense).
variables = {
't' : ('unknown field', 'temperature', 0),
's' : ('test field', 'temperature', 't'),
}
# Dirichlet boundary conditions.
ebcs = {
't1' : ('Gamma_1', {'t.0' : 2.0}),
't2' : ('Gamma_2', {'t.0' : -2.0}),
}
# Material coefficient c and source term value f.
material_1 = {
'name' : 'coef',
'values' : {
'val' : 1.0,
}
}
material_2 = {
'name' : 'source',
'values' : {
'val' : 10.0,
}
}
# Numerical quadrature and the equation.
integral_1 = {
'name' : 'i',
'order' : 2,
}
equations = {
'Poisson' : """dw_laplace.i.Omega( coef.val, s, t )
= dw_volume_lvf.i.Omega_1( source.val, s )"""
}
# Solvers.
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 1,
'eps_a' : 1e-10,
'eps_r' : 1.0,
'macheps' : 1e-16,
'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red).
'ls_red' : 0.1,
'ls_red_warp' : 0.001,
'ls_on' : 1.1,
'ls_min' : 1e-5,
'check' : 0,
'delta' : 1e-6,
}
functions = {
'select_circ': (lambda coors, domain=None:
select_circ(coors[:,0], coors[:,1], 0, default_diameter),),
}
# Functions.
def select_circ( x, y, z, diameter ):
"""Select circular subdomain of a given diameter."""
r = nm.sqrt( x**2 + y**2 )
out = nm.where(r < diameter)[0]
n = out.shape[0]
if n <= 3:
raise ValueError( 'too few vertices selected! (%d)' % n )
return out
def vary_omega1_size( problem ):
"""Vary size of \Omega1. Saves also the regions into options['output_dir'].
Input:
problem: Problem instance
Return:
a generator object:
1. creates new (modified) problem
2. yields the new (modified) problem and output container
3. use the output container for some logging
4. yields None (to signal next iteration to Application)
"""
from sfepy.discrete import Problem
from sfepy.solvers.ts import get_print_info
output.prefix = 'vary_omega1_size:'
diameters = nm.linspace( 0.1, 0.6, 7 ) + 0.001
ofn_trunk, output_format = problem.ofn_trunk, problem.output_format
output_dir = problem.output_dir
join = os.path.join
conf = problem.conf
cf = conf.get_raw( 'functions' )
n_digit, aux, d_format = get_print_info( len( diameters ) + 1 )
for ii, diameter in enumerate( diameters ):
output( 'iteration %d: diameter %3.2f' % (ii, diameter) )
cf['select_circ'] = (lambda coors, domain=None:
select_circ(coors[:,0], coors[:,1], 0, diameter),)
conf.edit('functions', cf)
problem = | Problem.from_conf(conf) | sfepy.discrete.Problem.from_conf |
r"""
Laplace equation with Dirichlet boundary conditions given by a sine function
and constants.
Find :math:`t` such that:
.. math::
\int_{\Omega} c \nabla s \cdot \nabla t
= 0
\;, \quad \forall s \;.
The :class:`sfepy.discrete.fem.meshio.UserMeshIO` class is used to refine the
original two-element mesh before the actual solution.
The FE polynomial basis and the approximation order can be chosen on the
command-line. By default, the fifth order Lagrange polynomial space is used,
see ``define()`` arguments.
This example demonstrates how to visualize higher order approximations of the
continuous solution. The adaptive linearization is applied in order to save
viewable results, see both the options keyword and the ``post_process()``
function that computes the solution gradient. The linearization parameters can
also be specified on the command line.
The Lagrange or Bernstein polynomial bases support higher order
DOFs in the Dirichlet boundary conditions, unlike the hierarchical Lobatto
basis implementation, compare the results of::
python simple.py examples/diffusion/sinbc.py -d basis=lagrange
python simple.py examples/diffusion/sinbc.py -d basis=bernstein
python simple.py examples/diffusion/sinbc.py -d basis=lobatto
Use the following commands to view each of the results of the above commands
(assuming default output directory and names)::
python postproc.py -b -d't,plot_warp_scalar,rel_scaling=1' 2_4_2_refined_t.vtk --wireframe
python postproc.py -b 2_4_2_refined_grad.vtk
"""
from __future__ import absolute_import
import numpy as nm
from sfepy import data_dir
from sfepy.base.base import output
from sfepy.discrete.fem import Mesh, FEDomain
from sfepy.discrete.fem.meshio import UserMeshIO, MeshIO
from sfepy.homogenization.utils import define_box_regions
from six.moves import range
base_mesh = data_dir + '/meshes/elements/2_4_2.mesh'
def mesh_hook(mesh, mode):
"""
Load and refine a mesh here.
"""
if mode == 'read':
mesh = Mesh.from_file(base_mesh)
domain = FEDomain(mesh.name, mesh)
for ii in range(3):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
domain.mesh.name = '2_4_2_refined'
return domain.mesh
elif mode == 'write':
pass
def post_process(out, pb, state, extend=False):
"""
Calculate gradient of the solution.
"""
from sfepy.discrete.fem.fields_base import create_expression_output
aux = create_expression_output('ev_grad.ie.Elements( t )',
'grad', 'temperature',
pb.fields, pb.get_materials(),
pb.get_variables(), functions=pb.functions,
mode='qp', verbose=False,
min_level=0, max_level=5, eps=1e-3)
out.update(aux)
return out
def define(order=5, basis='lagrange', min_level=0, max_level=5, eps=1e-3):
filename_mesh = | UserMeshIO(mesh_hook) | sfepy.discrete.fem.meshio.UserMeshIO |
r"""
Laplace equation with Dirichlet boundary conditions given by a sine function
and constants.
Find :math:`t` such that:
.. math::
\int_{\Omega} c \nabla s \cdot \nabla t
= 0
\;, \quad \forall s \;.
The :class:`sfepy.discrete.fem.meshio.UserMeshIO` class is used to refine the
original two-element mesh before the actual solution.
The FE polynomial basis and the approximation order can be chosen on the
command-line. By default, the fifth order Lagrange polynomial space is used,
see ``define()`` arguments.
This example demonstrates how to visualize higher order approximations of the
continuous solution. The adaptive linearization is applied in order to save
viewable results, see both the options keyword and the ``post_process()``
function that computes the solution gradient. The linearization parameters can
also be specified on the command line.
The Lagrange or Bernstein polynomial bases support higher order
DOFs in the Dirichlet boundary conditions, unlike the hierarchical Lobatto
basis implementation, compare the results of::
python simple.py examples/diffusion/sinbc.py -d basis=lagrange
python simple.py examples/diffusion/sinbc.py -d basis=bernstein
python simple.py examples/diffusion/sinbc.py -d basis=lobatto
Use the following commands to view each of the results of the above commands
(assuming default output directory and names)::
python postproc.py -b -d't,plot_warp_scalar,rel_scaling=1' 2_4_2_refined_t.vtk --wireframe
python postproc.py -b 2_4_2_refined_grad.vtk
"""
from __future__ import absolute_import
import numpy as nm
from sfepy import data_dir
from sfepy.base.base import output
from sfepy.discrete.fem import Mesh, FEDomain
from sfepy.discrete.fem.meshio import UserMeshIO, MeshIO
from sfepy.homogenization.utils import define_box_regions
from six.moves import range
base_mesh = data_dir + '/meshes/elements/2_4_2.mesh'
def mesh_hook(mesh, mode):
"""
Load and refine a mesh here.
"""
if mode == 'read':
mesh = Mesh.from_file(base_mesh)
domain = FEDomain(mesh.name, mesh)
for ii in range(3):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
domain.mesh.name = '2_4_2_refined'
return domain.mesh
elif mode == 'write':
pass
def post_process(out, pb, state, extend=False):
"""
Calculate gradient of the solution.
"""
from sfepy.discrete.fem.fields_base import create_expression_output
aux = create_expression_output('ev_grad.ie.Elements( t )',
'grad', 'temperature',
pb.fields, pb.get_materials(),
pb.get_variables(), functions=pb.functions,
mode='qp', verbose=False,
min_level=0, max_level=5, eps=1e-3)
out.update(aux)
return out
def define(order=5, basis='lagrange', min_level=0, max_level=5, eps=1e-3):
filename_mesh = UserMeshIO(mesh_hook)
# Get the mesh bounding box.
io = | MeshIO.any_from_filename(base_mesh) | sfepy.discrete.fem.meshio.MeshIO.any_from_filename |
r"""
Laplace equation with Dirichlet boundary conditions given by a sine function
and constants.
Find :math:`t` such that:
.. math::
\int_{\Omega} c \nabla s \cdot \nabla t
= 0
\;, \quad \forall s \;.
The :class:`sfepy.discrete.fem.meshio.UserMeshIO` class is used to refine the
original two-element mesh before the actual solution.
The FE polynomial basis and the approximation order can be chosen on the
command-line. By default, the fifth order Lagrange polynomial space is used,
see ``define()`` arguments.
This example demonstrates how to visualize higher order approximations of the
continuous solution. The adaptive linearization is applied in order to save
viewable results, see both the options keyword and the ``post_process()``
function that computes the solution gradient. The linearization parameters can
also be specified on the command line.
The Lagrange or Bernstein polynomial bases support higher order
DOFs in the Dirichlet boundary conditions, unlike the hierarchical Lobatto
basis implementation, compare the results of::
python simple.py examples/diffusion/sinbc.py -d basis=lagrange
python simple.py examples/diffusion/sinbc.py -d basis=bernstein
python simple.py examples/diffusion/sinbc.py -d basis=lobatto
Use the following commands to view each of the results of the above commands
(assuming default output directory and names)::
python postproc.py -b -d't,plot_warp_scalar,rel_scaling=1' 2_4_2_refined_t.vtk --wireframe
python postproc.py -b 2_4_2_refined_grad.vtk
"""
from __future__ import absolute_import
import numpy as nm
from sfepy import data_dir
from sfepy.base.base import output
from sfepy.discrete.fem import Mesh, FEDomain
from sfepy.discrete.fem.meshio import UserMeshIO, MeshIO
from sfepy.homogenization.utils import define_box_regions
from six.moves import range
base_mesh = data_dir + '/meshes/elements/2_4_2.mesh'
def mesh_hook(mesh, mode):
"""
Load and refine a mesh here.
"""
if mode == 'read':
mesh = | Mesh.from_file(base_mesh) | sfepy.discrete.fem.Mesh.from_file |
r"""
Laplace equation with Dirichlet boundary conditions given by a sine function
and constants.
Find :math:`t` such that:
.. math::
\int_{\Omega} c \nabla s \cdot \nabla t
= 0
\;, \quad \forall s \;.
The :class:`sfepy.discrete.fem.meshio.UserMeshIO` class is used to refine the
original two-element mesh before the actual solution.
The FE polynomial basis and the approximation order can be chosen on the
command-line. By default, the fifth order Lagrange polynomial space is used,
see ``define()`` arguments.
This example demonstrates how to visualize higher order approximations of the
continuous solution. The adaptive linearization is applied in order to save
viewable results, see both the options keyword and the ``post_process()``
function that computes the solution gradient. The linearization parameters can
also be specified on the command line.
The Lagrange or Bernstein polynomial bases support higher order
DOFs in the Dirichlet boundary conditions, unlike the hierarchical Lobatto
basis implementation, compare the results of::
python simple.py examples/diffusion/sinbc.py -d basis=lagrange
python simple.py examples/diffusion/sinbc.py -d basis=bernstein
python simple.py examples/diffusion/sinbc.py -d basis=lobatto
Use the following commands to view each of the results of the above commands
(assuming default output directory and names)::
python postproc.py -b -d't,plot_warp_scalar,rel_scaling=1' 2_4_2_refined_t.vtk --wireframe
python postproc.py -b 2_4_2_refined_grad.vtk
"""
from __future__ import absolute_import
import numpy as nm
from sfepy import data_dir
from sfepy.base.base import output
from sfepy.discrete.fem import Mesh, FEDomain
from sfepy.discrete.fem.meshio import UserMeshIO, MeshIO
from sfepy.homogenization.utils import define_box_regions
from six.moves import range
base_mesh = data_dir + '/meshes/elements/2_4_2.mesh'
def mesh_hook(mesh, mode):
"""
Load and refine a mesh here.
"""
if mode == 'read':
mesh = Mesh.from_file(base_mesh)
domain = | FEDomain(mesh.name, mesh) | sfepy.discrete.fem.FEDomain |
r"""
Laplace equation with Dirichlet boundary conditions given by a sine function
and constants.
Find :math:`t` such that:
.. math::
\int_{\Omega} c \nabla s \cdot \nabla t
= 0
\;, \quad \forall s \;.
The :class:`sfepy.discrete.fem.meshio.UserMeshIO` class is used to refine the
original two-element mesh before the actual solution.
The FE polynomial basis and the approximation order can be chosen on the
command-line. By default, the fifth order Lagrange polynomial space is used,
see ``define()`` arguments.
This example demonstrates how to visualize higher order approximations of the
continuous solution. The adaptive linearization is applied in order to save
viewable results, see both the options keyword and the ``post_process()``
function that computes the solution gradient. The linearization parameters can
also be specified on the command line.
The Lagrange or Bernstein polynomial bases support higher order
DOFs in the Dirichlet boundary conditions, unlike the hierarchical Lobatto
basis implementation, compare the results of::
python simple.py examples/diffusion/sinbc.py -d basis=lagrange
python simple.py examples/diffusion/sinbc.py -d basis=bernstein
python simple.py examples/diffusion/sinbc.py -d basis=lobatto
Use the following commands to view each of the results of the above commands
(assuming default output directory and names)::
python postproc.py -b -d't,plot_warp_scalar,rel_scaling=1' 2_4_2_refined_t.vtk --wireframe
python postproc.py -b 2_4_2_refined_grad.vtk
"""
from __future__ import absolute_import
import numpy as nm
from sfepy import data_dir
from sfepy.base.base import output
from sfepy.discrete.fem import Mesh, FEDomain
from sfepy.discrete.fem.meshio import UserMeshIO, MeshIO
from sfepy.homogenization.utils import define_box_regions
from six.moves import range
base_mesh = data_dir + '/meshes/elements/2_4_2.mesh'
def mesh_hook(mesh, mode):
"""
Load and refine a mesh here.
"""
if mode == 'read':
mesh = Mesh.from_file(base_mesh)
domain = FEDomain(mesh.name, mesh)
for ii in range(3):
| output('refine %d...' % ii) | sfepy.base.base.output |
"""
Elapsed time measurement utilities.
"""
import time
from sfepy.base.base import Struct
class Timer(Struct):
def __init__(self, name='timer', start=False):
| Struct.__init__(self, name=name) | sfepy.base.base.Struct.__init__ |
"""
Functions to visualize the CMesh geometry and topology.
"""
from sfepy.postprocess.plot_dofs import _get_axes, _to2d
def plot_wireframe(ax, cmesh, color='k'):
"""
Plot a finite element mesh as a wireframe using edges connectivity.
"""
coors = cmesh.coors
coors = | _to2d(coors) | sfepy.postprocess.plot_dofs._to2d |
"""
Functions to visualize the CMesh geometry and topology.
"""
from sfepy.postprocess.plot_dofs import _get_axes, _to2d
def plot_wireframe(ax, cmesh, color='k'):
"""
Plot a finite element mesh as a wireframe using edges connectivity.
"""
coors = cmesh.coors
coors = _to2d(coors)
dim = cmesh.dim
ax = | _get_axes(ax, dim) | sfepy.postprocess.plot_dofs._get_axes |
"""
Functions to visualize the CMesh geometry and topology.
"""
from sfepy.postprocess.plot_dofs import _get_axes, _to2d
def plot_wireframe(ax, cmesh, color='k'):
"""
Plot a finite element mesh as a wireframe using edges connectivity.
"""
coors = cmesh.coors
coors = _to2d(coors)
dim = cmesh.dim
ax = _get_axes(ax, dim)
edges = cmesh.get_conn(1, 0)
for edge_vertices in edges.indices.reshape((edges.num, 2)):
cc = coors[edge_vertices]
ax.plot(*cc.T, color=color)
return ax
def plot_entities(ax, cmesh, edim, color='b', size=10):
"""
Plot mesh topology entities using scatter plot.
"""
coors = cmesh.get_centroids(edim)
coors = | _to2d(coors) | sfepy.postprocess.plot_dofs._to2d |
"""
Functions to visualize the CMesh geometry and topology.
"""
from sfepy.postprocess.plot_dofs import _get_axes, _to2d
def plot_wireframe(ax, cmesh, color='k'):
"""
Plot a finite element mesh as a wireframe using edges connectivity.
"""
coors = cmesh.coors
coors = _to2d(coors)
dim = cmesh.dim
ax = _get_axes(ax, dim)
edges = cmesh.get_conn(1, 0)
for edge_vertices in edges.indices.reshape((edges.num, 2)):
cc = coors[edge_vertices]
ax.plot(*cc.T, color=color)
return ax
def plot_entities(ax, cmesh, edim, color='b', size=10):
"""
Plot mesh topology entities using scatter plot.
"""
coors = cmesh.get_centroids(edim)
coors = _to2d(coors)
dim = cmesh.dim
ax = | _get_axes(ax, dim) | sfepy.postprocess.plot_dofs._get_axes |
"""
Functions to visualize the CMesh geometry and topology.
"""
from sfepy.postprocess.plot_dofs import _get_axes, _to2d
def plot_wireframe(ax, cmesh, color='k'):
"""
Plot a finite element mesh as a wireframe using edges connectivity.
"""
coors = cmesh.coors
coors = _to2d(coors)
dim = cmesh.dim
ax = _get_axes(ax, dim)
edges = cmesh.get_conn(1, 0)
for edge_vertices in edges.indices.reshape((edges.num, 2)):
cc = coors[edge_vertices]
ax.plot(*cc.T, color=color)
return ax
def plot_entities(ax, cmesh, edim, color='b', size=10):
"""
Plot mesh topology entities using scatter plot.
"""
coors = cmesh.get_centroids(edim)
coors = _to2d(coors)
dim = cmesh.dim
ax = _get_axes(ax, dim)
ax.scatter(*coors.T, s=size, c=color)
return ax
def label_global_entities(ax, cmesh, edim, color='b', fontsize=10):
"""
Label mesh topology entities using global ids.
"""
coors = cmesh.get_centroids(edim)
coors = | _to2d(coors) | sfepy.postprocess.plot_dofs._to2d |
"""
Functions to visualize the CMesh geometry and topology.
"""
from sfepy.postprocess.plot_dofs import _get_axes, _to2d
def plot_wireframe(ax, cmesh, color='k'):
"""
Plot a finite element mesh as a wireframe using edges connectivity.
"""
coors = cmesh.coors
coors = _to2d(coors)
dim = cmesh.dim
ax = _get_axes(ax, dim)
edges = cmesh.get_conn(1, 0)
for edge_vertices in edges.indices.reshape((edges.num, 2)):
cc = coors[edge_vertices]
ax.plot(*cc.T, color=color)
return ax
def plot_entities(ax, cmesh, edim, color='b', size=10):
"""
Plot mesh topology entities using scatter plot.
"""
coors = cmesh.get_centroids(edim)
coors = _to2d(coors)
dim = cmesh.dim
ax = _get_axes(ax, dim)
ax.scatter(*coors.T, s=size, c=color)
return ax
def label_global_entities(ax, cmesh, edim, color='b', fontsize=10):
"""
Label mesh topology entities using global ids.
"""
coors = cmesh.get_centroids(edim)
coors = _to2d(coors)
dim = cmesh.dim
ax = | _get_axes(ax, dim) | sfepy.postprocess.plot_dofs._get_axes |
"""
Functions to visualize the CMesh geometry and topology.
"""
from sfepy.postprocess.plot_dofs import _get_axes, _to2d
def plot_wireframe(ax, cmesh, color='k'):
"""
Plot a finite element mesh as a wireframe using edges connectivity.
"""
coors = cmesh.coors
coors = _to2d(coors)
dim = cmesh.dim
ax = _get_axes(ax, dim)
edges = cmesh.get_conn(1, 0)
for edge_vertices in edges.indices.reshape((edges.num, 2)):
cc = coors[edge_vertices]
ax.plot(*cc.T, color=color)
return ax
def plot_entities(ax, cmesh, edim, color='b', size=10):
"""
Plot mesh topology entities using scatter plot.
"""
coors = cmesh.get_centroids(edim)
coors = _to2d(coors)
dim = cmesh.dim
ax = _get_axes(ax, dim)
ax.scatter(*coors.T, s=size, c=color)
return ax
def label_global_entities(ax, cmesh, edim, color='b', fontsize=10):
"""
Label mesh topology entities using global ids.
"""
coors = cmesh.get_centroids(edim)
coors = _to2d(coors)
dim = cmesh.dim
ax = _get_axes(ax, dim)
for ii, cc in enumerate(coors):
ax.text(*cc.T, s=ii, color=color, fontsize=fontsize)
return ax
def label_local_entities(ax, cmesh, edim, color='b', fontsize=10):
"""
Label mesh topology entities using cell-local ids.
"""
coors = cmesh.get_centroids(edim)
coors = | _to2d(coors) | sfepy.postprocess.plot_dofs._to2d |
"""
Functions to visualize the CMesh geometry and topology.
"""
from sfepy.postprocess.plot_dofs import _get_axes, _to2d
def plot_wireframe(ax, cmesh, color='k'):
"""
Plot a finite element mesh as a wireframe using edges connectivity.
"""
coors = cmesh.coors
coors = _to2d(coors)
dim = cmesh.dim
ax = _get_axes(ax, dim)
edges = cmesh.get_conn(1, 0)
for edge_vertices in edges.indices.reshape((edges.num, 2)):
cc = coors[edge_vertices]
ax.plot(*cc.T, color=color)
return ax
def plot_entities(ax, cmesh, edim, color='b', size=10):
"""
Plot mesh topology entities using scatter plot.
"""
coors = cmesh.get_centroids(edim)
coors = _to2d(coors)
dim = cmesh.dim
ax = _get_axes(ax, dim)
ax.scatter(*coors.T, s=size, c=color)
return ax
def label_global_entities(ax, cmesh, edim, color='b', fontsize=10):
"""
Label mesh topology entities using global ids.
"""
coors = cmesh.get_centroids(edim)
coors = _to2d(coors)
dim = cmesh.dim
ax = _get_axes(ax, dim)
for ii, cc in enumerate(coors):
ax.text(*cc.T, s=ii, color=color, fontsize=fontsize)
return ax
def label_local_entities(ax, cmesh, edim, color='b', fontsize=10):
"""
Label mesh topology entities using cell-local ids.
"""
coors = cmesh.get_centroids(edim)
coors = _to2d(coors)
dim = cmesh.dim
centres = cmesh.get_centroids(dim)
cmesh.setup_connectivity(dim, edim)
conn = cmesh.get_conn(dim, edim)
off = conn.offsets
ax = | _get_axes(ax, dim) | sfepy.postprocess.plot_dofs._get_axes |
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
import numpy as np
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
# def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
# mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
mesh = | Mesh.from_file(data_dir + '/meshes/3d/cube_medium_hexa.mesh') | sfepy.discrete.fem.Mesh.from_file |
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
import numpy as np
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
# def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
# mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
mesh = Mesh.from_file(data_dir + '/meshes/3d/cube_medium_hexa.mesh')
domain = | FEDomain('domain', mesh) | sfepy.discrete.fem.FEDomain |
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
import numpy as np
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
# def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
# mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
mesh = Mesh.from_file(data_dir + '/meshes/3d/cube_medium_hexa.mesh')
domain = FEDomain('domain', mesh)
# min_x, max_x = domain.get_mesh_bounding_box()[:,0]
# eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
# gamma1 = domain.create_region('Gamma1',
# 'vertices in x < %.10f' % (min_x + eps),
# 'facet')
Bottom = domain.create_region('Bottom',
'vertices in z < %.10f' % -0.499,
'facet')
# gamma2 = domain.create_region('Gamma2',
# 'vertices in x > %.10f' % (max_x - eps),
# 'facet')
Top = domain.create_region('Top',
'vertices in z > %.10f' % 0.499,
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=3)
u = | FieldVariable('u', 'unknown', field) | sfepy.discrete.FieldVariable |
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
import numpy as np
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
# def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
# mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
mesh = Mesh.from_file(data_dir + '/meshes/3d/cube_medium_hexa.mesh')
domain = FEDomain('domain', mesh)
# min_x, max_x = domain.get_mesh_bounding_box()[:,0]
# eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
# gamma1 = domain.create_region('Gamma1',
# 'vertices in x < %.10f' % (min_x + eps),
# 'facet')
Bottom = domain.create_region('Bottom',
'vertices in z < %.10f' % -0.499,
'facet')
# gamma2 = domain.create_region('Gamma2',
# 'vertices in x > %.10f' % (max_x - eps),
# 'facet')
Top = domain.create_region('Top',
'vertices in z > %.10f' % 0.499,
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=3)
u = FieldVariable('u', 'unknown', field)
v = | FieldVariable('v', 'test', field, primary_var_name='u') | sfepy.discrete.FieldVariable |
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
import numpy as np
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
# def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
# mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
mesh = Mesh.from_file(data_dir + '/meshes/3d/cube_medium_hexa.mesh')
domain = FEDomain('domain', mesh)
# min_x, max_x = domain.get_mesh_bounding_box()[:,0]
# eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
# gamma1 = domain.create_region('Gamma1',
# 'vertices in x < %.10f' % (min_x + eps),
# 'facet')
Bottom = domain.create_region('Bottom',
'vertices in z < %.10f' % -0.499,
'facet')
# gamma2 = domain.create_region('Gamma2',
# 'vertices in x > %.10f' % (max_x - eps),
# 'facet')
Top = domain.create_region('Top',
'vertices in z > %.10f' % 0.499,
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=3)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
# materials = {
# 'solid' : ({
# 'D': stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
# },),
# 'cs' : ({
# 'f' : [1e5, 1e-2],
# '.c' : [0.0, 0.0, 1.2],
# '.r' : 0.8,
# },),
# }
# defK = materials['cs'][0]
# cs = ContactSphere(csc['.c'], csc['.r'])
m = Material('m', D=stiffness_from_lame(dim=3, lam=5.769, mu=3.846))
# f = Material('f', val=[[0.02], [0.01]])
# csf = Material('csf', val=[1e5, 1e-2])
# csc = Material('csc', val=[0.0, 0.0, 1.2])
# csr = Material('csr', val=0.8)
cs = | Material('cs',f=[1e5, 1e-2],c=[0.0, 0.0, 1.2],r=0.8) | sfepy.discrete.Material |
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
import numpy as np
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
# def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
# mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
mesh = Mesh.from_file(data_dir + '/meshes/3d/cube_medium_hexa.mesh')
domain = FEDomain('domain', mesh)
# min_x, max_x = domain.get_mesh_bounding_box()[:,0]
# eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
# gamma1 = domain.create_region('Gamma1',
# 'vertices in x < %.10f' % (min_x + eps),
# 'facet')
Bottom = domain.create_region('Bottom',
'vertices in z < %.10f' % -0.499,
'facet')
# gamma2 = domain.create_region('Gamma2',
# 'vertices in x > %.10f' % (max_x - eps),
# 'facet')
Top = domain.create_region('Top',
'vertices in z > %.10f' % 0.499,
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=3)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
# materials = {
# 'solid' : ({
# 'D': stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
# },),
# 'cs' : ({
# 'f' : [1e5, 1e-2],
# '.c' : [0.0, 0.0, 1.2],
# '.r' : 0.8,
# },),
# }
# defK = materials['cs'][0]
# cs = ContactSphere(csc['.c'], csc['.r'])
m = Material('m', D=stiffness_from_lame(dim=3, lam=5.769, mu=3.846))
# f = Material('f', val=[[0.02], [0.01]])
# csf = Material('csf', val=[1e5, 1e-2])
# csc = Material('csc', val=[0.0, 0.0, 1.2])
# csr = Material('csr', val=0.8)
cs = Material('cs',f=[1e5, 1e-2],c=[0.0, 0.0, 1.2],r=0.8)
integral = | Integral('i', order=3) | sfepy.discrete.Integral |
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
import numpy as np
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
# def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
# mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
mesh = Mesh.from_file(data_dir + '/meshes/3d/cube_medium_hexa.mesh')
domain = FEDomain('domain', mesh)
# min_x, max_x = domain.get_mesh_bounding_box()[:,0]
# eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
# gamma1 = domain.create_region('Gamma1',
# 'vertices in x < %.10f' % (min_x + eps),
# 'facet')
Bottom = domain.create_region('Bottom',
'vertices in z < %.10f' % -0.499,
'facet')
# gamma2 = domain.create_region('Gamma2',
# 'vertices in x > %.10f' % (max_x - eps),
# 'facet')
Top = domain.create_region('Top',
'vertices in z > %.10f' % 0.499,
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=3)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
# materials = {
# 'solid' : ({
# 'D': stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
# },),
# 'cs' : ({
# 'f' : [1e5, 1e-2],
# '.c' : [0.0, 0.0, 1.2],
# '.r' : 0.8,
# },),
# }
# defK = materials['cs'][0]
# cs = ContactSphere(csc['.c'], csc['.r'])
m = Material('m', D=stiffness_from_lame(dim=3, lam=5.769, mu=3.846))
# f = Material('f', val=[[0.02], [0.01]])
# csf = Material('csf', val=[1e5, 1e-2])
# csc = Material('csc', val=[0.0, 0.0, 1.2])
# csr = Material('csr', val=0.8)
cs = Material('cs',f=[1e5, 1e-2],c=[0.0, 0.0, 1.2],r=0.8)
integral = Integral('i', order=3)
integral1 = | Integral('i', order=2) | sfepy.discrete.Integral |
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
import numpy as np
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
# def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
# mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
mesh = Mesh.from_file(data_dir + '/meshes/3d/cube_medium_hexa.mesh')
domain = FEDomain('domain', mesh)
# min_x, max_x = domain.get_mesh_bounding_box()[:,0]
# eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
# gamma1 = domain.create_region('Gamma1',
# 'vertices in x < %.10f' % (min_x + eps),
# 'facet')
Bottom = domain.create_region('Bottom',
'vertices in z < %.10f' % -0.499,
'facet')
# gamma2 = domain.create_region('Gamma2',
# 'vertices in x > %.10f' % (max_x - eps),
# 'facet')
Top = domain.create_region('Top',
'vertices in z > %.10f' % 0.499,
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=3)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
# materials = {
# 'solid' : ({
# 'D': stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
# },),
# 'cs' : ({
# 'f' : [1e5, 1e-2],
# '.c' : [0.0, 0.0, 1.2],
# '.r' : 0.8,
# },),
# }
# defK = materials['cs'][0]
# cs = ContactSphere(csc['.c'], csc['.r'])
m = Material('m', D=stiffness_from_lame(dim=3, lam=5.769, mu=3.846))
# f = Material('f', val=[[0.02], [0.01]])
# csf = Material('csf', val=[1e5, 1e-2])
# csc = Material('csc', val=[0.0, 0.0, 1.2])
# csr = Material('csr', val=0.8)
cs = Material('cs',f=[1e5, 1e-2],c=[0.0, 0.0, 1.2],r=0.8)
integral = Integral('i', order=3)
integral1 = Integral('i', order=2)
t1 = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
t2 = | Term.new('dw_contact_sphere(cs.f, cs.c, cs.r, v, u)', integral1, Top, cs=cs, v=v, u=u) | sfepy.terms.Term.new |
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
import numpy as np
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
# def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
# mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
mesh = Mesh.from_file(data_dir + '/meshes/3d/cube_medium_hexa.mesh')
domain = FEDomain('domain', mesh)
# min_x, max_x = domain.get_mesh_bounding_box()[:,0]
# eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
# gamma1 = domain.create_region('Gamma1',
# 'vertices in x < %.10f' % (min_x + eps),
# 'facet')
Bottom = domain.create_region('Bottom',
'vertices in z < %.10f' % -0.499,
'facet')
# gamma2 = domain.create_region('Gamma2',
# 'vertices in x > %.10f' % (max_x - eps),
# 'facet')
Top = domain.create_region('Top',
'vertices in z > %.10f' % 0.499,
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=3)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
# materials = {
# 'solid' : ({
# 'D': stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
# },),
# 'cs' : ({
# 'f' : [1e5, 1e-2],
# '.c' : [0.0, 0.0, 1.2],
# '.r' : 0.8,
# },),
# }
# defK = materials['cs'][0]
# cs = ContactSphere(csc['.c'], csc['.r'])
m = Material('m', D=stiffness_from_lame(dim=3, lam=5.769, mu=3.846))
# f = Material('f', val=[[0.02], [0.01]])
# csf = Material('csf', val=[1e5, 1e-2])
# csc = Material('csc', val=[0.0, 0.0, 1.2])
# csr = Material('csr', val=0.8)
cs = Material('cs',f=[1e5, 1e-2],c=[0.0, 0.0, 1.2],r=0.8)
integral = Integral('i', order=3)
integral1 = Integral('i', order=2)
t1 = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
t2 = Term.new('dw_contact_sphere(cs.f, cs.c, cs.r, v, u)', integral1, Top, cs=cs, v=v, u=u)
eq = | Equation('balance', t1 + t2) | sfepy.discrete.Equation |
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
import numpy as np
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
# def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
# mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
mesh = Mesh.from_file(data_dir + '/meshes/3d/cube_medium_hexa.mesh')
domain = FEDomain('domain', mesh)
# min_x, max_x = domain.get_mesh_bounding_box()[:,0]
# eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
# gamma1 = domain.create_region('Gamma1',
# 'vertices in x < %.10f' % (min_x + eps),
# 'facet')
Bottom = domain.create_region('Bottom',
'vertices in z < %.10f' % -0.499,
'facet')
# gamma2 = domain.create_region('Gamma2',
# 'vertices in x > %.10f' % (max_x - eps),
# 'facet')
Top = domain.create_region('Top',
'vertices in z > %.10f' % 0.499,
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=3)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
# materials = {
# 'solid' : ({
# 'D': stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
# },),
# 'cs' : ({
# 'f' : [1e5, 1e-2],
# '.c' : [0.0, 0.0, 1.2],
# '.r' : 0.8,
# },),
# }
# defK = materials['cs'][0]
# cs = ContactSphere(csc['.c'], csc['.r'])
m = Material('m', D=stiffness_from_lame(dim=3, lam=5.769, mu=3.846))
# f = Material('f', val=[[0.02], [0.01]])
# csf = Material('csf', val=[1e5, 1e-2])
# csc = Material('csc', val=[0.0, 0.0, 1.2])
# csr = Material('csr', val=0.8)
cs = Material('cs',f=[1e5, 1e-2],c=[0.0, 0.0, 1.2],r=0.8)
integral = Integral('i', order=3)
integral1 = Integral('i', order=2)
t1 = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
t2 = Term.new('dw_contact_sphere(cs.f, cs.c, cs.r, v, u)', integral1, Top, cs=cs, v=v, u=u)
eq = Equation('balance', t1 + t2)
eqs = | Equations([eq]) | sfepy.discrete.Equations |
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
import numpy as np
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
# def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
# mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
mesh = Mesh.from_file(data_dir + '/meshes/3d/cube_medium_hexa.mesh')
domain = FEDomain('domain', mesh)
# min_x, max_x = domain.get_mesh_bounding_box()[:,0]
# eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
# gamma1 = domain.create_region('Gamma1',
# 'vertices in x < %.10f' % (min_x + eps),
# 'facet')
Bottom = domain.create_region('Bottom',
'vertices in z < %.10f' % -0.499,
'facet')
# gamma2 = domain.create_region('Gamma2',
# 'vertices in x > %.10f' % (max_x - eps),
# 'facet')
Top = domain.create_region('Top',
'vertices in z > %.10f' % 0.499,
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=3)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
# materials = {
# 'solid' : ({
# 'D': stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
# },),
# 'cs' : ({
# 'f' : [1e5, 1e-2],
# '.c' : [0.0, 0.0, 1.2],
# '.r' : 0.8,
# },),
# }
# defK = materials['cs'][0]
# cs = ContactSphere(csc['.c'], csc['.r'])
m = Material('m', D=stiffness_from_lame(dim=3, lam=5.769, mu=3.846))
# f = Material('f', val=[[0.02], [0.01]])
# csf = Material('csf', val=[1e5, 1e-2])
# csc = Material('csc', val=[0.0, 0.0, 1.2])
# csr = Material('csr', val=0.8)
cs = Material('cs',f=[1e5, 1e-2],c=[0.0, 0.0, 1.2],r=0.8)
integral = Integral('i', order=3)
integral1 = Integral('i', order=2)
t1 = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
t2 = Term.new('dw_contact_sphere(cs.f, cs.c, cs.r, v, u)', integral1, Top, cs=cs, v=v, u=u)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
fix_u = EssentialBC('fix_u', Bottom, {'u.all' : 0.0})
# bc_fun = Function('shift_u_fun', shift_u_fun,
# extra_args={'shift' : 0.01})
# shift_u = EssentialBC('shift_u', gamma2, {'u.0' : bc_fun})
ls = | ScipyDirect({}) | sfepy.solvers.ls.ScipyDirect |
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
import numpy as np
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
# def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
# mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
mesh = Mesh.from_file(data_dir + '/meshes/3d/cube_medium_hexa.mesh')
domain = FEDomain('domain', mesh)
# min_x, max_x = domain.get_mesh_bounding_box()[:,0]
# eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
# gamma1 = domain.create_region('Gamma1',
# 'vertices in x < %.10f' % (min_x + eps),
# 'facet')
Bottom = domain.create_region('Bottom',
'vertices in z < %.10f' % -0.499,
'facet')
# gamma2 = domain.create_region('Gamma2',
# 'vertices in x > %.10f' % (max_x - eps),
# 'facet')
Top = domain.create_region('Top',
'vertices in z > %.10f' % 0.499,
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=3)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
# materials = {
# 'solid' : ({
# 'D': stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
# },),
# 'cs' : ({
# 'f' : [1e5, 1e-2],
# '.c' : [0.0, 0.0, 1.2],
# '.r' : 0.8,
# },),
# }
# defK = materials['cs'][0]
# cs = ContactSphere(csc['.c'], csc['.r'])
m = Material('m', D=stiffness_from_lame(dim=3, lam=5.769, mu=3.846))
# f = Material('f', val=[[0.02], [0.01]])
# csf = Material('csf', val=[1e5, 1e-2])
# csc = Material('csc', val=[0.0, 0.0, 1.2])
# csr = Material('csr', val=0.8)
cs = Material('cs',f=[1e5, 1e-2],c=[0.0, 0.0, 1.2],r=0.8)
integral = Integral('i', order=3)
integral1 = Integral('i', order=2)
t1 = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
t2 = Term.new('dw_contact_sphere(cs.f, cs.c, cs.r, v, u)', integral1, Top, cs=cs, v=v, u=u)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
fix_u = EssentialBC('fix_u', Bottom, {'u.all' : 0.0})
# bc_fun = Function('shift_u_fun', shift_u_fun,
# extra_args={'shift' : 0.01})
# shift_u = EssentialBC('shift_u', gamma2, {'u.0' : bc_fun})
ls = ScipyDirect({})
nls_status = | IndexedStruct() | sfepy.base.base.IndexedStruct |
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
import numpy as np
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
# def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
# mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
mesh = Mesh.from_file(data_dir + '/meshes/3d/cube_medium_hexa.mesh')
domain = FEDomain('domain', mesh)
# min_x, max_x = domain.get_mesh_bounding_box()[:,0]
# eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
# gamma1 = domain.create_region('Gamma1',
# 'vertices in x < %.10f' % (min_x + eps),
# 'facet')
Bottom = domain.create_region('Bottom',
'vertices in z < %.10f' % -0.499,
'facet')
# gamma2 = domain.create_region('Gamma2',
# 'vertices in x > %.10f' % (max_x - eps),
# 'facet')
Top = domain.create_region('Top',
'vertices in z > %.10f' % 0.499,
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=3)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
# materials = {
# 'solid' : ({
# 'D': stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
# },),
# 'cs' : ({
# 'f' : [1e5, 1e-2],
# '.c' : [0.0, 0.0, 1.2],
# '.r' : 0.8,
# },),
# }
# defK = materials['cs'][0]
# cs = ContactSphere(csc['.c'], csc['.r'])
m = Material('m', D=stiffness_from_lame(dim=3, lam=5.769, mu=3.846))
# f = Material('f', val=[[0.02], [0.01]])
# csf = Material('csf', val=[1e5, 1e-2])
# csc = Material('csc', val=[0.0, 0.0, 1.2])
# csr = Material('csr', val=0.8)
cs = Material('cs',f=[1e5, 1e-2],c=[0.0, 0.0, 1.2],r=0.8)
integral = Integral('i', order=3)
integral1 = Integral('i', order=2)
t1 = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
t2 = Term.new('dw_contact_sphere(cs.f, cs.c, cs.r, v, u)', integral1, Top, cs=cs, v=v, u=u)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
fix_u = EssentialBC('fix_u', Bottom, {'u.all' : 0.0})
# bc_fun = Function('shift_u_fun', shift_u_fun,
# extra_args={'shift' : 0.01})
# shift_u = EssentialBC('shift_u', gamma2, {'u.0' : bc_fun})
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = | Newton({}, lin_solver=ls, status=nls_status) | sfepy.solvers.nls.Newton |
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
import numpy as np
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
# def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
# mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
mesh = Mesh.from_file(data_dir + '/meshes/3d/cube_medium_hexa.mesh')
domain = FEDomain('domain', mesh)
# min_x, max_x = domain.get_mesh_bounding_box()[:,0]
# eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
# gamma1 = domain.create_region('Gamma1',
# 'vertices in x < %.10f' % (min_x + eps),
# 'facet')
Bottom = domain.create_region('Bottom',
'vertices in z < %.10f' % -0.499,
'facet')
# gamma2 = domain.create_region('Gamma2',
# 'vertices in x > %.10f' % (max_x - eps),
# 'facet')
Top = domain.create_region('Top',
'vertices in z > %.10f' % 0.499,
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=3)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
# materials = {
# 'solid' : ({
# 'D': stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
# },),
# 'cs' : ({
# 'f' : [1e5, 1e-2],
# '.c' : [0.0, 0.0, 1.2],
# '.r' : 0.8,
# },),
# }
# defK = materials['cs'][0]
# cs = ContactSphere(csc['.c'], csc['.r'])
m = Material('m', D=stiffness_from_lame(dim=3, lam=5.769, mu=3.846))
# f = Material('f', val=[[0.02], [0.01]])
# csf = Material('csf', val=[1e5, 1e-2])
# csc = Material('csc', val=[0.0, 0.0, 1.2])
# csr = Material('csr', val=0.8)
cs = Material('cs',f=[1e5, 1e-2],c=[0.0, 0.0, 1.2],r=0.8)
integral = Integral('i', order=3)
integral1 = Integral('i', order=2)
t1 = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
t2 = Term.new('dw_contact_sphere(cs.f, cs.c, cs.r, v, u)', integral1, Top, cs=cs, v=v, u=u)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
fix_u = EssentialBC('fix_u', Bottom, {'u.all' : 0.0})
# bc_fun = Function('shift_u_fun', shift_u_fun,
# extra_args={'shift' : 0.01})
# shift_u = EssentialBC('shift_u', gamma2, {'u.0' : bc_fun})
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton({}, lin_solver=ls, status=nls_status)
pb = | Problem('elasticity', equations=eqs) | sfepy.discrete.Problem |
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
import numpy as np
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
# def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
# mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
mesh = Mesh.from_file(data_dir + '/meshes/3d/cube_medium_hexa.mesh')
domain = FEDomain('domain', mesh)
# min_x, max_x = domain.get_mesh_bounding_box()[:,0]
# eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
# gamma1 = domain.create_region('Gamma1',
# 'vertices in x < %.10f' % (min_x + eps),
# 'facet')
Bottom = domain.create_region('Bottom',
'vertices in z < %.10f' % -0.499,
'facet')
# gamma2 = domain.create_region('Gamma2',
# 'vertices in x > %.10f' % (max_x - eps),
# 'facet')
Top = domain.create_region('Top',
'vertices in z > %.10f' % 0.499,
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=3)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
# materials = {
# 'solid' : ({
# 'D': stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
# },),
# 'cs' : ({
# 'f' : [1e5, 1e-2],
# '.c' : [0.0, 0.0, 1.2],
# '.r' : 0.8,
# },),
# }
# defK = materials['cs'][0]
# cs = ContactSphere(csc['.c'], csc['.r'])
m = Material('m', D=stiffness_from_lame(dim=3, lam=5.769, mu=3.846))
# f = Material('f', val=[[0.02], [0.01]])
# csf = Material('csf', val=[1e5, 1e-2])
# csc = Material('csc', val=[0.0, 0.0, 1.2])
# csr = Material('csr', val=0.8)
cs = Material('cs',f=[1e5, 1e-2],c=[0.0, 0.0, 1.2],r=0.8)
integral = Integral('i', order=3)
integral1 = Integral('i', order=2)
t1 = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
t2 = Term.new('dw_contact_sphere(cs.f, cs.c, cs.r, v, u)', integral1, Top, cs=cs, v=v, u=u)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
fix_u = EssentialBC('fix_u', Bottom, {'u.all' : 0.0})
# bc_fun = Function('shift_u_fun', shift_u_fun,
# extra_args={'shift' : 0.01})
# shift_u = EssentialBC('shift_u', gamma2, {'u.0' : bc_fun})
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton({}, lin_solver=ls, status=nls_status)
pb = Problem('elasticity', equations=eqs)
pb.save_regions_as_groups('regions')
pb.set_bcs(ebcs=Conditions([fix_u]))
pb.set_solver(nls)
status = | IndexedStruct() | sfepy.base.base.IndexedStruct |
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
import numpy as np
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
# def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
# mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
mesh = Mesh.from_file(data_dir + '/meshes/3d/cube_medium_hexa.mesh')
domain = FEDomain('domain', mesh)
# min_x, max_x = domain.get_mesh_bounding_box()[:,0]
# eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
# gamma1 = domain.create_region('Gamma1',
# 'vertices in x < %.10f' % (min_x + eps),
# 'facet')
Bottom = domain.create_region('Bottom',
'vertices in z < %.10f' % -0.499,
'facet')
# gamma2 = domain.create_region('Gamma2',
# 'vertices in x > %.10f' % (max_x - eps),
# 'facet')
Top = domain.create_region('Top',
'vertices in z > %.10f' % 0.499,
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=3)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
# materials = {
# 'solid' : ({
# 'D': stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
# },),
# 'cs' : ({
# 'f' : [1e5, 1e-2],
# '.c' : [0.0, 0.0, 1.2],
# '.r' : 0.8,
# },),
# }
# defK = materials['cs'][0]
# cs = ContactSphere(csc['.c'], csc['.r'])
m = Material('m', D=stiffness_from_lame(dim=3, lam=5.769, mu=3.846))
# f = Material('f', val=[[0.02], [0.01]])
# csf = Material('csf', val=[1e5, 1e-2])
# csc = Material('csc', val=[0.0, 0.0, 1.2])
# csr = Material('csr', val=0.8)
cs = Material('cs',f=[1e5, 1e-2],c=[0.0, 0.0, 1.2],r=0.8)
integral = Integral('i', order=3)
integral1 = Integral('i', order=2)
t1 = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
t2 = Term.new('dw_contact_sphere(cs.f, cs.c, cs.r, v, u)', integral1, Top, cs=cs, v=v, u=u)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
fix_u = EssentialBC('fix_u', Bottom, {'u.all' : 0.0})
# bc_fun = Function('shift_u_fun', shift_u_fun,
# extra_args={'shift' : 0.01})
# shift_u = EssentialBC('shift_u', gamma2, {'u.0' : bc_fun})
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton({}, lin_solver=ls, status=nls_status)
pb = Problem('elasticity', equations=eqs)
pb.save_regions_as_groups('regions')
pb.set_bcs(ebcs=Conditions([fix_u]))
pb.set_solver(nls)
status = IndexedStruct()
state = pb.solve(status=status)
print('Nonlinear solver status:\n', nls_status)
print('Stationary solver status:\n', status)
pb.save_state('linear_elasticity.vtk', state)
# if options.show:
view = | Viewer('linear_elasticity.vtk') | sfepy.postprocess.viewer.Viewer |
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
import numpy as np
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
# def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
# mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
mesh = Mesh.from_file(data_dir + '/meshes/3d/cube_medium_hexa.mesh')
domain = FEDomain('domain', mesh)
# min_x, max_x = domain.get_mesh_bounding_box()[:,0]
# eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
# gamma1 = domain.create_region('Gamma1',
# 'vertices in x < %.10f' % (min_x + eps),
# 'facet')
Bottom = domain.create_region('Bottom',
'vertices in z < %.10f' % -0.499,
'facet')
# gamma2 = domain.create_region('Gamma2',
# 'vertices in x > %.10f' % (max_x - eps),
# 'facet')
Top = domain.create_region('Top',
'vertices in z > %.10f' % 0.499,
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=3)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
# materials = {
# 'solid' : ({
# 'D': stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
# },),
# 'cs' : ({
# 'f' : [1e5, 1e-2],
# '.c' : [0.0, 0.0, 1.2],
# '.r' : 0.8,
# },),
# }
# defK = materials['cs'][0]
# cs = ContactSphere(csc['.c'], csc['.r'])
m = Material('m', D= | stiffness_from_lame(dim=3, lam=5.769, mu=3.846) | sfepy.mechanics.matcoefs.stiffness_from_lame |
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.mechanics.matcoefs import stiffness_from_lame
import numpy as np
def shift_u_fun(ts, coors, bc=None, problem=None, shift=0.0):
"""
Define a displacement depending on the y coordinate.
"""
val = shift * coors[:,1]**2
return val
helps = {
'show' : 'show the results figure',
}
# def main():
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
# mesh = Mesh.from_file(data_dir + '/meshes/2d/rectangle_tri.mesh')
mesh = Mesh.from_file(data_dir + '/meshes/3d/cube_medium_hexa.mesh')
domain = FEDomain('domain', mesh)
# min_x, max_x = domain.get_mesh_bounding_box()[:,0]
# eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
# gamma1 = domain.create_region('Gamma1',
# 'vertices in x < %.10f' % (min_x + eps),
# 'facet')
Bottom = domain.create_region('Bottom',
'vertices in z < %.10f' % -0.499,
'facet')
# gamma2 = domain.create_region('Gamma2',
# 'vertices in x > %.10f' % (max_x - eps),
# 'facet')
Top = domain.create_region('Top',
'vertices in z > %.10f' % 0.499,
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=3)
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
# materials = {
# 'solid' : ({
# 'D': stiffness_from_lame(dim=3, lam=5.769, mu=3.846),
# },),
# 'cs' : ({
# 'f' : [1e5, 1e-2],
# '.c' : [0.0, 0.0, 1.2],
# '.r' : 0.8,
# },),
# }
# defK = materials['cs'][0]
# cs = ContactSphere(csc['.c'], csc['.r'])
m = Material('m', D=stiffness_from_lame(dim=3, lam=5.769, mu=3.846))
# f = Material('f', val=[[0.02], [0.01]])
# csf = Material('csf', val=[1e5, 1e-2])
# csc = Material('csc', val=[0.0, 0.0, 1.2])
# csr = Material('csr', val=0.8)
cs = Material('cs',f=[1e5, 1e-2],c=[0.0, 0.0, 1.2],r=0.8)
integral = Integral('i', order=3)
integral1 = Integral('i', order=2)
t1 = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
t2 = Term.new('dw_contact_sphere(cs.f, cs.c, cs.r, v, u)', integral1, Top, cs=cs, v=v, u=u)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
fix_u = EssentialBC('fix_u', Bottom, {'u.all' : 0.0})
# bc_fun = Function('shift_u_fun', shift_u_fun,
# extra_args={'shift' : 0.01})
# shift_u = EssentialBC('shift_u', gamma2, {'u.0' : bc_fun})
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton({}, lin_solver=ls, status=nls_status)
pb = Problem('elasticity', equations=eqs)
pb.save_regions_as_groups('regions')
pb.set_bcs(ebcs= | Conditions([fix_u]) | sfepy.discrete.conditions.Conditions |
"""
Classes holding information on global DOFs and mapping of all DOFs -
equations (active DOFs).
Helper functions for the equation mapping.
"""
import numpy as nm
import scipy.sparse as sp
from sfepy.base.base import assert_, Struct, basestr
from sfepy.discrete.functions import Function
from sfepy.discrete.conditions import get_condition_value, EssentialBC, \
PeriodicBC, DGPeriodicBC, DGEssentialBC
def expand_nodes_to_dofs(nods, n_dof_per_node):
"""
Expand DOF node indices into DOFs given a constant number of DOFs
per node.
"""
dofs = nm.repeat(nods, n_dof_per_node)
dofs.shape = (nods.shape[0], n_dof_per_node)
idof = nm.arange(n_dof_per_node, dtype=nm.int32)
dofs = n_dof_per_node * dofs + idof
return dofs
def expand_nodes_to_equations(nods, dof_names, all_dof_names):
"""
Expand vector of node indices to equations (DOF indices) based on
the DOF-per-node count.
DOF names must be already canonized.
Returns
-------
eq : array
The equations/DOF indices in the node-by-node order.
"""
dpn = len(all_dof_names)
nc = len(dof_names)
eq = nm.empty(len(nods) * nc, dtype=nm.int32)
for ii, dof in enumerate(dof_names):
idof = all_dof_names.index(dof)
eq[ii::nc] = dpn * nods + idof
return eq
def resolve_chains(master_slave, chains):
"""
Resolve EPBC chains - e.g. in corner nodes.
"""
for chain in chains:
slave = chain[-1]
master_slave[chain[:-1]] = slave + 1
master_slave[slave] = - chain[0] - 1 # Any of masters...
def group_chains(chain_list):
"""
Group EPBC chains.
"""
chains = []
while len(chain_list):
chain = set(chain_list.pop(0))
## print ':', chain
ii = 0
while ii < len(chain_list):
c1 = sorted(chain_list[ii])
## print '--', ii, c1, chain
is0 = c1[0] in chain
is1 = c1[1] in chain
if is0 and is1:
chain_list.pop(ii)
elif is0 or is1:
chain.update(c1)
chain_list.pop(ii)
ii = 0
else:
ii += 1
## print ii, chain, chain_list
## print '->', chain
## print chain_list
chains.append(list(chain))
## print 'EPBC chain groups:', chains
aux = {}
for chain in chains:
aux.setdefault(len(chain), [0])[0] += 1
## print 'EPBC chain counts:', aux
return chains
class DofInfo(Struct):
"""
Global DOF information, i.e. ordering of DOFs of the state (unknown)
variables in the global state vector.
"""
def __init__(self, name):
| Struct.__init__(self, name=name) | sfepy.base.base.Struct.__init__ |
"""
Classes holding information on global DOFs and mapping of all DOFs -
equations (active DOFs).
Helper functions for the equation mapping.
"""
import numpy as nm
import scipy.sparse as sp
from sfepy.base.base import assert_, Struct, basestr
from sfepy.discrete.functions import Function
from sfepy.discrete.conditions import get_condition_value, EssentialBC, \
PeriodicBC, DGPeriodicBC, DGEssentialBC
def expand_nodes_to_dofs(nods, n_dof_per_node):
"""
Expand DOF node indices into DOFs given a constant number of DOFs
per node.
"""
dofs = nm.repeat(nods, n_dof_per_node)
dofs.shape = (nods.shape[0], n_dof_per_node)
idof = nm.arange(n_dof_per_node, dtype=nm.int32)
dofs = n_dof_per_node * dofs + idof
return dofs
def expand_nodes_to_equations(nods, dof_names, all_dof_names):
"""
Expand vector of node indices to equations (DOF indices) based on
the DOF-per-node count.
DOF names must be already canonized.
Returns
-------
eq : array
The equations/DOF indices in the node-by-node order.
"""
dpn = len(all_dof_names)
nc = len(dof_names)
eq = nm.empty(len(nods) * nc, dtype=nm.int32)
for ii, dof in enumerate(dof_names):
idof = all_dof_names.index(dof)
eq[ii::nc] = dpn * nods + idof
return eq
def resolve_chains(master_slave, chains):
"""
Resolve EPBC chains - e.g. in corner nodes.
"""
for chain in chains:
slave = chain[-1]
master_slave[chain[:-1]] = slave + 1
master_slave[slave] = - chain[0] - 1 # Any of masters...
def group_chains(chain_list):
"""
Group EPBC chains.
"""
chains = []
while len(chain_list):
chain = set(chain_list.pop(0))
## print ':', chain
ii = 0
while ii < len(chain_list):
c1 = sorted(chain_list[ii])
## print '--', ii, c1, chain
is0 = c1[0] in chain
is1 = c1[1] in chain
if is0 and is1:
chain_list.pop(ii)
elif is0 or is1:
chain.update(c1)
chain_list.pop(ii)
ii = 0
else:
ii += 1
## print ii, chain, chain_list
## print '->', chain
## print chain_list
chains.append(list(chain))
## print 'EPBC chain groups:', chains
aux = {}
for chain in chains:
aux.setdefault(len(chain), [0])[0] += 1
## print 'EPBC chain counts:', aux
return chains
class DofInfo(Struct):
"""
Global DOF information, i.e. ordering of DOFs of the state (unknown)
variables in the global state vector.
"""
def __init__(self, name):
Struct.__init__(self, name=name)
self.n_var = 0
self.var_names = []
self.n_dof = {}
self.ptr = [0]
self.indx = {}
self.details = {}
def _update_after_append(self, name):
self.ptr.append(self.ptr[-1] + self.n_dof[name])
ii = self.n_var
self.indx[name] = slice(int(self.ptr[ii]), int(self.ptr[ii+1]))
self.n_var += 1
def append_variable(self, var, active=False):
"""
Append DOFs of the given variable.
Parameters
----------
var : Variable instance
The variable to append.
active : bool, optional
When True, only active (non-constrained) DOFs are considered.
"""
name = var.name
if name in self.var_names:
raise ValueError('variable %s already present!' % name)
self.var_names.append(name)
self.n_dof[name], self.details[name] = var.get_dof_info(active=active)
self._update_after_append(name)
def append_raw(self, name, n_dof):
"""
Append raw DOFs.
Parameters
----------
name : str
The name of variable the DOFs correspond to.
n_dof : int
The number of DOFs.
"""
if name in self.var_names:
raise ValueError('variable %s already present!' % name)
self.var_names.append(name)
self.n_dof[name], self.details[name] = n_dof, None
self._update_after_append(name)
def update(self, name, n_dof):
"""
Set the number of DOFs of the given variable.
Parameters
----------
name : str
The name of variable the DOFs correspond to.
n_dof : int
The number of DOFs.
"""
if not name in self.var_names:
raise ValueError('variable %s is not present!' % name)
ii = self.var_names.index(name)
delta = n_dof - self.n_dof[name]
self.n_dof[name] = n_dof
for iv, nn in enumerate(self.var_names[ii:]):
self.ptr[ii+iv+1] += delta
self.indx[nn] = slice(self.ptr[ii+iv], self.ptr[ii+iv+1])
def get_info(self, var_name):
"""
Return information on DOFs of the given variable.
Parameters
----------
var_name : str
The name of the variable.
"""
return Struct(name='%s_dof_info' % var_name,
var_name=var_name,
n_dof=self.n_dof[var_name],
indx=self.indx[var_name],
details=self.details[var_name])
def get_subset_info(self, var_names):
"""
Return global DOF information for selected variables
only. Silently ignores non-existing variable names.
Parameters
----------
var_names : list
The names of the selected variables.
"""
di = DofInfo(self.name + ':subset')
for var_name in var_names:
if var_name not in self.var_names:
continue
di.append_raw(var_name, self.n_dof[var_name])
return di
def get_n_dof_total(self):
"""
Return the total number of DOFs of all state variables.
"""
return self.ptr[-1]
def is_active_bc(bc, ts=None, functions=None):
"""
Check whether the given boundary condition is active in the current
time.
Returns
-------
active : bool
True if the condition `bc` is active.
"""
if (bc.times is None) or (ts is None):
active = True
elif isinstance(bc.times, list):
for tt in bc.times:
if tt[0] <= ts.time < tt[1]:
active = True
break
else:
active = False
else:
if isinstance(bc.times, basestr):
if functions is not None:
fun = functions[bc.times]
else:
raise ValueError('no functions given for bc %s!' % bc.name)
elif isinstance(bc.times, Function):
fun = bc.times
else:
raise ValueError('unknown times type! (%s)'
% type(bc.times))
active = fun(ts)
return active
class EquationMap(Struct):
"""
Map all DOFs to equations for active DOFs.
"""
def __init__(self, name, dof_names, var_di):
| Struct.__init__(self, name=name, dof_names=dof_names, var_di=var_di) | sfepy.base.base.Struct.__init__ |
"""
Classes holding information on global DOFs and mapping of all DOFs -
equations (active DOFs).
Helper functions for the equation mapping.
"""
import numpy as nm
import scipy.sparse as sp
from sfepy.base.base import assert_, Struct, basestr
from sfepy.discrete.functions import Function
from sfepy.discrete.conditions import get_condition_value, EssentialBC, \
PeriodicBC, DGPeriodicBC, DGEssentialBC
def expand_nodes_to_dofs(nods, n_dof_per_node):
"""
Expand DOF node indices into DOFs given a constant number of DOFs
per node.
"""
dofs = nm.repeat(nods, n_dof_per_node)
dofs.shape = (nods.shape[0], n_dof_per_node)
idof = nm.arange(n_dof_per_node, dtype=nm.int32)
dofs = n_dof_per_node * dofs + idof
return dofs
def expand_nodes_to_equations(nods, dof_names, all_dof_names):
"""
Expand vector of node indices to equations (DOF indices) based on
the DOF-per-node count.
DOF names must be already canonized.
Returns
-------
eq : array
The equations/DOF indices in the node-by-node order.
"""
dpn = len(all_dof_names)
nc = len(dof_names)
eq = nm.empty(len(nods) * nc, dtype=nm.int32)
for ii, dof in enumerate(dof_names):
idof = all_dof_names.index(dof)
eq[ii::nc] = dpn * nods + idof
return eq
def resolve_chains(master_slave, chains):
"""
Resolve EPBC chains - e.g. in corner nodes.
"""
for chain in chains:
slave = chain[-1]
master_slave[chain[:-1]] = slave + 1
master_slave[slave] = - chain[0] - 1 # Any of masters...
def group_chains(chain_list):
"""
Group EPBC chains.
"""
chains = []
while len(chain_list):
chain = set(chain_list.pop(0))
## print ':', chain
ii = 0
while ii < len(chain_list):
c1 = sorted(chain_list[ii])
## print '--', ii, c1, chain
is0 = c1[0] in chain
is1 = c1[1] in chain
if is0 and is1:
chain_list.pop(ii)
elif is0 or is1:
chain.update(c1)
chain_list.pop(ii)
ii = 0
else:
ii += 1
## print ii, chain, chain_list
## print '->', chain
## print chain_list
chains.append(list(chain))
## print 'EPBC chain groups:', chains
aux = {}
for chain in chains:
aux.setdefault(len(chain), [0])[0] += 1
## print 'EPBC chain counts:', aux
return chains
class DofInfo(Struct):
"""
Global DOF information, i.e. ordering of DOFs of the state (unknown)
variables in the global state vector.
"""
def __init__(self, name):
Struct.__init__(self, name=name)
self.n_var = 0
self.var_names = []
self.n_dof = {}
self.ptr = [0]
self.indx = {}
self.details = {}
def _update_after_append(self, name):
self.ptr.append(self.ptr[-1] + self.n_dof[name])
ii = self.n_var
self.indx[name] = slice(int(self.ptr[ii]), int(self.ptr[ii+1]))
self.n_var += 1
def append_variable(self, var, active=False):
"""
Append DOFs of the given variable.
Parameters
----------
var : Variable instance
The variable to append.
active : bool, optional
When True, only active (non-constrained) DOFs are considered.
"""
name = var.name
if name in self.var_names:
raise ValueError('variable %s already present!' % name)
self.var_names.append(name)
self.n_dof[name], self.details[name] = var.get_dof_info(active=active)
self._update_after_append(name)
def append_raw(self, name, n_dof):
"""
Append raw DOFs.
Parameters
----------
name : str
The name of variable the DOFs correspond to.
n_dof : int
The number of DOFs.
"""
if name in self.var_names:
raise ValueError('variable %s already present!' % name)
self.var_names.append(name)
self.n_dof[name], self.details[name] = n_dof, None
self._update_after_append(name)
def update(self, name, n_dof):
"""
Set the number of DOFs of the given variable.
Parameters
----------
name : str
The name of variable the DOFs correspond to.
n_dof : int
The number of DOFs.
"""
if not name in self.var_names:
raise ValueError('variable %s is not present!' % name)
ii = self.var_names.index(name)
delta = n_dof - self.n_dof[name]
self.n_dof[name] = n_dof
for iv, nn in enumerate(self.var_names[ii:]):
self.ptr[ii+iv+1] += delta
self.indx[nn] = slice(self.ptr[ii+iv], self.ptr[ii+iv+1])
def get_info(self, var_name):
"""
Return information on DOFs of the given variable.
Parameters
----------
var_name : str
The name of the variable.
"""
return Struct(name='%s_dof_info' % var_name,
var_name=var_name,
n_dof=self.n_dof[var_name],
indx=self.indx[var_name],
details=self.details[var_name])
def get_subset_info(self, var_names):
"""
Return global DOF information for selected variables
only. Silently ignores non-existing variable names.
Parameters
----------
var_names : list
The names of the selected variables.
"""
di = DofInfo(self.name + ':subset')
for var_name in var_names:
if var_name not in self.var_names:
continue
di.append_raw(var_name, self.n_dof[var_name])
return di
def get_n_dof_total(self):
"""
Return the total number of DOFs of all state variables.
"""
return self.ptr[-1]
def is_active_bc(bc, ts=None, functions=None):
"""
Check whether the given boundary condition is active in the current
time.
Returns
-------
active : bool
True if the condition `bc` is active.
"""
if (bc.times is None) or (ts is None):
active = True
elif isinstance(bc.times, list):
for tt in bc.times:
if tt[0] <= ts.time < tt[1]:
active = True
break
else:
active = False
else:
if isinstance(bc.times, basestr):
if functions is not None:
fun = functions[bc.times]
else:
raise ValueError('no functions given for bc %s!' % bc.name)
elif isinstance(bc.times, Function):
fun = bc.times
else:
raise ValueError('unknown times type! (%s)'
% type(bc.times))
active = fun(ts)
return active
class EquationMap(Struct):
"""
Map all DOFs to equations for active DOFs.
"""
def __init__(self, name, dof_names, var_di):
Struct.__init__(self, name=name, dof_names=dof_names, var_di=var_di)
self.dpn = len(self.dof_names)
self.eq = nm.arange(var_di.n_dof, dtype=nm.int32)
self.n_dg_ebc = 0
self.dg_ebc_names = {}
self.dg_ebc = {}
self.dg_ebc_val = {}
self.n_dg_epbc = 0
self.dg_epbc_names = []
self.dg_epbc = []
def _init_empty(self, field):
self.val_ebc = nm.empty((0,), dtype=field.dtype)
if field.get('unused_dofs') is None:
self.eqi = nm.arange(self.var_di.n_dof, dtype=nm.int32)
else:
self._mark_unused(field)
self.eqi = nm.compress(self.eq >= 0, self.eq)
self.eq[self.eqi] = nm.arange(self.eqi.shape[0], dtype=nm.int32)
self.eq_ebc = nm.empty((0,), dtype=nm.int32)
self.master = nm.empty((0,), dtype=nm.int32)
self.slave = nm.empty((0,), dtype=nm.int32)
self.n_eq = self.eqi.shape[0]
self.n_ebc = self.eq_ebc.shape[0]
self.n_epbc = self.master.shape[0]
def _mark_unused(self, field):
unused_dofs = field.get('unused_dofs')
if unused_dofs is not None:
unused = expand_nodes_to_equations(field.unused_dofs,
self.dof_names, self.dof_names)
self.eq[unused] = -3
def map_equations(self, bcs, field, ts, functions, problem=None,
warn=False):
"""
Create the mapping of active DOFs from/to all DOFs.
Parameters
----------
bcs : Conditions instance
The Dirichlet or periodic boundary conditions (single
condition instances). The dof names in the conditions must
already be canonized.
field : Field instance
The field of the variable holding the DOFs.
ts : TimeStepper instance
The time stepper.
functions : Functions instance
The registered functions.
problem : Problem instance, optional
The problem that can be passed to user functions as a context.
warn : bool, optional
If True, warn about BC on non-existent nodes.
Returns
-------
active_bcs : set
The set of boundary conditions active in the current time.
Notes
-----
- Periodic bc: master and slave DOFs must belong to the same
field (variables can differ, though).
"""
if bcs is None:
self._init_empty(field)
return set()
eq_ebc = nm.zeros((self.var_di.n_dof,), dtype=nm.int32)
val_ebc = nm.zeros((self.var_di.n_dof,), dtype=field.dtype)
master_slave = nm.zeros((self.var_di.n_dof,), dtype=nm.int32)
chains = []
active_bcs = set()
for bc in bcs:
# Skip conditions that are not active in the current time.
if not is_active_bc(bc, ts=ts, functions=functions):
continue
active_bcs.add(bc.key)
if isinstance(bc, DGEssentialBC):
ntype = "DGEBC"
region = bc.region
elif isinstance(bc, DGPeriodicBC):
ntype = "DGEPBC"
region = bc.regions[0]
elif isinstance(bc, EssentialBC):
ntype = 'EBC'
region = bc.region
elif isinstance(bc, PeriodicBC):
ntype = 'EPBC'
region = bc.regions[0]
if warn:
clean_msg = ('warning: ignoring nonexistent %s node (%s) in '
% (ntype, self.var_di.var_name))
else:
clean_msg = None
# Get master region nodes.
master_nod_list = field.get_dofs_in_region(region)
if len(master_nod_list) == 0:
continue
if ntype == 'EBC': # EBC.
dofs, val = bc.dofs
##
# Evaluate EBC values.
fun = | get_condition_value(val, functions, 'EBC', bc.name) | sfepy.discrete.conditions.get_condition_value |
"""
Classes holding information on global DOFs and mapping of all DOFs -
equations (active DOFs).
Helper functions for the equation mapping.
"""
import numpy as nm
import scipy.sparse as sp
from sfepy.base.base import assert_, Struct, basestr
from sfepy.discrete.functions import Function
from sfepy.discrete.conditions import get_condition_value, EssentialBC, \
PeriodicBC, DGPeriodicBC, DGEssentialBC
def expand_nodes_to_dofs(nods, n_dof_per_node):
"""
Expand DOF node indices into DOFs given a constant number of DOFs
per node.
"""
dofs = nm.repeat(nods, n_dof_per_node)
dofs.shape = (nods.shape[0], n_dof_per_node)
idof = nm.arange(n_dof_per_node, dtype=nm.int32)
dofs = n_dof_per_node * dofs + idof
return dofs
def expand_nodes_to_equations(nods, dof_names, all_dof_names):
"""
Expand vector of node indices to equations (DOF indices) based on
the DOF-per-node count.
DOF names must be already canonized.
Returns
-------
eq : array
The equations/DOF indices in the node-by-node order.
"""
dpn = len(all_dof_names)
nc = len(dof_names)
eq = nm.empty(len(nods) * nc, dtype=nm.int32)
for ii, dof in enumerate(dof_names):
idof = all_dof_names.index(dof)
eq[ii::nc] = dpn * nods + idof
return eq
def resolve_chains(master_slave, chains):
"""
Resolve EPBC chains - e.g. in corner nodes.
"""
for chain in chains:
slave = chain[-1]
master_slave[chain[:-1]] = slave + 1
master_slave[slave] = - chain[0] - 1 # Any of masters...
def group_chains(chain_list):
"""
Group EPBC chains.
"""
chains = []
while len(chain_list):
chain = set(chain_list.pop(0))
## print ':', chain
ii = 0
while ii < len(chain_list):
c1 = sorted(chain_list[ii])
## print '--', ii, c1, chain
is0 = c1[0] in chain
is1 = c1[1] in chain
if is0 and is1:
chain_list.pop(ii)
elif is0 or is1:
chain.update(c1)
chain_list.pop(ii)
ii = 0
else:
ii += 1
## print ii, chain, chain_list
## print '->', chain
## print chain_list
chains.append(list(chain))
## print 'EPBC chain groups:', chains
aux = {}
for chain in chains:
aux.setdefault(len(chain), [0])[0] += 1
## print 'EPBC chain counts:', aux
return chains
class DofInfo(Struct):
"""
Global DOF information, i.e. ordering of DOFs of the state (unknown)
variables in the global state vector.
"""
def __init__(self, name):
Struct.__init__(self, name=name)
self.n_var = 0
self.var_names = []
self.n_dof = {}
self.ptr = [0]
self.indx = {}
self.details = {}
def _update_after_append(self, name):
self.ptr.append(self.ptr[-1] + self.n_dof[name])
ii = self.n_var
self.indx[name] = slice(int(self.ptr[ii]), int(self.ptr[ii+1]))
self.n_var += 1
def append_variable(self, var, active=False):
"""
Append DOFs of the given variable.
Parameters
----------
var : Variable instance
The variable to append.
active : bool, optional
When True, only active (non-constrained) DOFs are considered.
"""
name = var.name
if name in self.var_names:
raise ValueError('variable %s already present!' % name)
self.var_names.append(name)
self.n_dof[name], self.details[name] = var.get_dof_info(active=active)
self._update_after_append(name)
def append_raw(self, name, n_dof):
"""
Append raw DOFs.
Parameters
----------
name : str
The name of variable the DOFs correspond to.
n_dof : int
The number of DOFs.
"""
if name in self.var_names:
raise ValueError('variable %s already present!' % name)
self.var_names.append(name)
self.n_dof[name], self.details[name] = n_dof, None
self._update_after_append(name)
def update(self, name, n_dof):
"""
Set the number of DOFs of the given variable.
Parameters
----------
name : str
The name of variable the DOFs correspond to.
n_dof : int
The number of DOFs.
"""
if not name in self.var_names:
raise ValueError('variable %s is not present!' % name)
ii = self.var_names.index(name)
delta = n_dof - self.n_dof[name]
self.n_dof[name] = n_dof
for iv, nn in enumerate(self.var_names[ii:]):
self.ptr[ii+iv+1] += delta
self.indx[nn] = slice(self.ptr[ii+iv], self.ptr[ii+iv+1])
def get_info(self, var_name):
"""
Return information on DOFs of the given variable.
Parameters
----------
var_name : str
The name of the variable.
"""
return Struct(name='%s_dof_info' % var_name,
var_name=var_name,
n_dof=self.n_dof[var_name],
indx=self.indx[var_name],
details=self.details[var_name])
def get_subset_info(self, var_names):
"""
Return global DOF information for selected variables
only. Silently ignores non-existing variable names.
Parameters
----------
var_names : list
The names of the selected variables.
"""
di = DofInfo(self.name + ':subset')
for var_name in var_names:
if var_name not in self.var_names:
continue
di.append_raw(var_name, self.n_dof[var_name])
return di
def get_n_dof_total(self):
"""
Return the total number of DOFs of all state variables.
"""
return self.ptr[-1]
def is_active_bc(bc, ts=None, functions=None):
"""
Check whether the given boundary condition is active in the current
time.
Returns
-------
active : bool
True if the condition `bc` is active.
"""
if (bc.times is None) or (ts is None):
active = True
elif isinstance(bc.times, list):
for tt in bc.times:
if tt[0] <= ts.time < tt[1]:
active = True
break
else:
active = False
else:
if isinstance(bc.times, basestr):
if functions is not None:
fun = functions[bc.times]
else:
raise ValueError('no functions given for bc %s!' % bc.name)
elif isinstance(bc.times, Function):
fun = bc.times
else:
raise ValueError('unknown times type! (%s)'
% type(bc.times))
active = fun(ts)
return active
class EquationMap(Struct):
"""
Map all DOFs to equations for active DOFs.
"""
def __init__(self, name, dof_names, var_di):
Struct.__init__(self, name=name, dof_names=dof_names, var_di=var_di)
self.dpn = len(self.dof_names)
self.eq = nm.arange(var_di.n_dof, dtype=nm.int32)
self.n_dg_ebc = 0
self.dg_ebc_names = {}
self.dg_ebc = {}
self.dg_ebc_val = {}
self.n_dg_epbc = 0
self.dg_epbc_names = []
self.dg_epbc = []
def _init_empty(self, field):
self.val_ebc = nm.empty((0,), dtype=field.dtype)
if field.get('unused_dofs') is None:
self.eqi = nm.arange(self.var_di.n_dof, dtype=nm.int32)
else:
self._mark_unused(field)
self.eqi = nm.compress(self.eq >= 0, self.eq)
self.eq[self.eqi] = nm.arange(self.eqi.shape[0], dtype=nm.int32)
self.eq_ebc = nm.empty((0,), dtype=nm.int32)
self.master = nm.empty((0,), dtype=nm.int32)
self.slave = nm.empty((0,), dtype=nm.int32)
self.n_eq = self.eqi.shape[0]
self.n_ebc = self.eq_ebc.shape[0]
self.n_epbc = self.master.shape[0]
def _mark_unused(self, field):
unused_dofs = field.get('unused_dofs')
if unused_dofs is not None:
unused = expand_nodes_to_equations(field.unused_dofs,
self.dof_names, self.dof_names)
self.eq[unused] = -3
def map_equations(self, bcs, field, ts, functions, problem=None,
warn=False):
"""
Create the mapping of active DOFs from/to all DOFs.
Parameters
----------
bcs : Conditions instance
The Dirichlet or periodic boundary conditions (single
condition instances). The dof names in the conditions must
already be canonized.
field : Field instance
The field of the variable holding the DOFs.
ts : TimeStepper instance
The time stepper.
functions : Functions instance
The registered functions.
problem : Problem instance, optional
The problem that can be passed to user functions as a context.
warn : bool, optional
If True, warn about BC on non-existent nodes.
Returns
-------
active_bcs : set
The set of boundary conditions active in the current time.
Notes
-----
- Periodic bc: master and slave DOFs must belong to the same
field (variables can differ, though).
"""
if bcs is None:
self._init_empty(field)
return set()
eq_ebc = nm.zeros((self.var_di.n_dof,), dtype=nm.int32)
val_ebc = nm.zeros((self.var_di.n_dof,), dtype=field.dtype)
master_slave = nm.zeros((self.var_di.n_dof,), dtype=nm.int32)
chains = []
active_bcs = set()
for bc in bcs:
# Skip conditions that are not active in the current time.
if not is_active_bc(bc, ts=ts, functions=functions):
continue
active_bcs.add(bc.key)
if isinstance(bc, DGEssentialBC):
ntype = "DGEBC"
region = bc.region
elif isinstance(bc, DGPeriodicBC):
ntype = "DGEPBC"
region = bc.regions[0]
elif isinstance(bc, EssentialBC):
ntype = 'EBC'
region = bc.region
elif isinstance(bc, PeriodicBC):
ntype = 'EPBC'
region = bc.regions[0]
if warn:
clean_msg = ('warning: ignoring nonexistent %s node (%s) in '
% (ntype, self.var_di.var_name))
else:
clean_msg = None
# Get master region nodes.
master_nod_list = field.get_dofs_in_region(region)
if len(master_nod_list) == 0:
continue
if ntype == 'EBC': # EBC.
dofs, val = bc.dofs
##
# Evaluate EBC values.
fun = get_condition_value(val, functions, 'EBC', bc.name)
if isinstance(fun, Function):
aux = fun
fun = lambda coors: aux(ts, coors,
bc=bc, problem=problem)
nods, vv = field.set_dofs(fun, region, len(dofs), clean_msg)
eq = expand_nodes_to_equations(nods, dofs, self.dof_names)
# Duplicates removed here...
eq_ebc[eq] = 1
if vv is not None: val_ebc[eq] = nm.ravel(vv)
elif ntype == "DGEBC":
dofs, val = bc.dofs
##
# Evaluate EBC values.
fun = | get_condition_value(val, functions, 'EBC', bc.name) | sfepy.discrete.conditions.get_condition_value |
"""
Classes holding information on global DOFs and mapping of all DOFs -
equations (active DOFs).
Helper functions for the equation mapping.
"""
import numpy as nm
import scipy.sparse as sp
from sfepy.base.base import assert_, Struct, basestr
from sfepy.discrete.functions import Function
from sfepy.discrete.conditions import get_condition_value, EssentialBC, \
PeriodicBC, DGPeriodicBC, DGEssentialBC
def expand_nodes_to_dofs(nods, n_dof_per_node):
"""
Expand DOF node indices into DOFs given a constant number of DOFs
per node.
"""
dofs = nm.repeat(nods, n_dof_per_node)
dofs.shape = (nods.shape[0], n_dof_per_node)
idof = nm.arange(n_dof_per_node, dtype=nm.int32)
dofs = n_dof_per_node * dofs + idof
return dofs
def expand_nodes_to_equations(nods, dof_names, all_dof_names):
"""
Expand vector of node indices to equations (DOF indices) based on
the DOF-per-node count.
DOF names must be already canonized.
Returns
-------
eq : array
The equations/DOF indices in the node-by-node order.
"""
dpn = len(all_dof_names)
nc = len(dof_names)
eq = nm.empty(len(nods) * nc, dtype=nm.int32)
for ii, dof in enumerate(dof_names):
idof = all_dof_names.index(dof)
eq[ii::nc] = dpn * nods + idof
return eq
def resolve_chains(master_slave, chains):
"""
Resolve EPBC chains - e.g. in corner nodes.
"""
for chain in chains:
slave = chain[-1]
master_slave[chain[:-1]] = slave + 1
master_slave[slave] = - chain[0] - 1 # Any of masters...
def group_chains(chain_list):
"""
Group EPBC chains.
"""
chains = []
while len(chain_list):
chain = set(chain_list.pop(0))
## print ':', chain
ii = 0
while ii < len(chain_list):
c1 = sorted(chain_list[ii])
## print '--', ii, c1, chain
is0 = c1[0] in chain
is1 = c1[1] in chain
if is0 and is1:
chain_list.pop(ii)
elif is0 or is1:
chain.update(c1)
chain_list.pop(ii)
ii = 0
else:
ii += 1
## print ii, chain, chain_list
## print '->', chain
## print chain_list
chains.append(list(chain))
## print 'EPBC chain groups:', chains
aux = {}
for chain in chains:
aux.setdefault(len(chain), [0])[0] += 1
## print 'EPBC chain counts:', aux
return chains
class DofInfo(Struct):
"""
Global DOF information, i.e. ordering of DOFs of the state (unknown)
variables in the global state vector.
"""
def __init__(self, name):
Struct.__init__(self, name=name)
self.n_var = 0
self.var_names = []
self.n_dof = {}
self.ptr = [0]
self.indx = {}
self.details = {}
def _update_after_append(self, name):
self.ptr.append(self.ptr[-1] + self.n_dof[name])
ii = self.n_var
self.indx[name] = slice(int(self.ptr[ii]), int(self.ptr[ii+1]))
self.n_var += 1
def append_variable(self, var, active=False):
"""
Append DOFs of the given variable.
Parameters
----------
var : Variable instance
The variable to append.
active : bool, optional
When True, only active (non-constrained) DOFs are considered.
"""
name = var.name
if name in self.var_names:
raise ValueError('variable %s already present!' % name)
self.var_names.append(name)
self.n_dof[name], self.details[name] = var.get_dof_info(active=active)
self._update_after_append(name)
def append_raw(self, name, n_dof):
"""
Append raw DOFs.
Parameters
----------
name : str
The name of variable the DOFs correspond to.
n_dof : int
The number of DOFs.
"""
if name in self.var_names:
raise ValueError('variable %s already present!' % name)
self.var_names.append(name)
self.n_dof[name], self.details[name] = n_dof, None
self._update_after_append(name)
def update(self, name, n_dof):
"""
Set the number of DOFs of the given variable.
Parameters
----------
name : str
The name of variable the DOFs correspond to.
n_dof : int
The number of DOFs.
"""
if not name in self.var_names:
raise ValueError('variable %s is not present!' % name)
ii = self.var_names.index(name)
delta = n_dof - self.n_dof[name]
self.n_dof[name] = n_dof
for iv, nn in enumerate(self.var_names[ii:]):
self.ptr[ii+iv+1] += delta
self.indx[nn] = slice(self.ptr[ii+iv], self.ptr[ii+iv+1])
def get_info(self, var_name):
"""
Return information on DOFs of the given variable.
Parameters
----------
var_name : str
The name of the variable.
"""
return Struct(name='%s_dof_info' % var_name,
var_name=var_name,
n_dof=self.n_dof[var_name],
indx=self.indx[var_name],
details=self.details[var_name])
def get_subset_info(self, var_names):
"""
Return global DOF information for selected variables
only. Silently ignores non-existing variable names.
Parameters
----------
var_names : list
The names of the selected variables.
"""
di = DofInfo(self.name + ':subset')
for var_name in var_names:
if var_name not in self.var_names:
continue
di.append_raw(var_name, self.n_dof[var_name])
return di
def get_n_dof_total(self):
"""
Return the total number of DOFs of all state variables.
"""
return self.ptr[-1]
def is_active_bc(bc, ts=None, functions=None):
"""
Check whether the given boundary condition is active in the current
time.
Returns
-------
active : bool
True if the condition `bc` is active.
"""
if (bc.times is None) or (ts is None):
active = True
elif isinstance(bc.times, list):
for tt in bc.times:
if tt[0] <= ts.time < tt[1]:
active = True
break
else:
active = False
else:
if isinstance(bc.times, basestr):
if functions is not None:
fun = functions[bc.times]
else:
raise ValueError('no functions given for bc %s!' % bc.name)
elif isinstance(bc.times, Function):
fun = bc.times
else:
raise ValueError('unknown times type! (%s)'
% type(bc.times))
active = fun(ts)
return active
class EquationMap(Struct):
"""
Map all DOFs to equations for active DOFs.
"""
def __init__(self, name, dof_names, var_di):
Struct.__init__(self, name=name, dof_names=dof_names, var_di=var_di)
self.dpn = len(self.dof_names)
self.eq = nm.arange(var_di.n_dof, dtype=nm.int32)
self.n_dg_ebc = 0
self.dg_ebc_names = {}
self.dg_ebc = {}
self.dg_ebc_val = {}
self.n_dg_epbc = 0
self.dg_epbc_names = []
self.dg_epbc = []
def _init_empty(self, field):
self.val_ebc = nm.empty((0,), dtype=field.dtype)
if field.get('unused_dofs') is None:
self.eqi = nm.arange(self.var_di.n_dof, dtype=nm.int32)
else:
self._mark_unused(field)
self.eqi = nm.compress(self.eq >= 0, self.eq)
self.eq[self.eqi] = nm.arange(self.eqi.shape[0], dtype=nm.int32)
self.eq_ebc = nm.empty((0,), dtype=nm.int32)
self.master = nm.empty((0,), dtype=nm.int32)
self.slave = nm.empty((0,), dtype=nm.int32)
self.n_eq = self.eqi.shape[0]
self.n_ebc = self.eq_ebc.shape[0]
self.n_epbc = self.master.shape[0]
def _mark_unused(self, field):
unused_dofs = field.get('unused_dofs')
if unused_dofs is not None:
unused = expand_nodes_to_equations(field.unused_dofs,
self.dof_names, self.dof_names)
self.eq[unused] = -3
def map_equations(self, bcs, field, ts, functions, problem=None,
warn=False):
"""
Create the mapping of active DOFs from/to all DOFs.
Parameters
----------
bcs : Conditions instance
The Dirichlet or periodic boundary conditions (single
condition instances). The dof names in the conditions must
already be canonized.
field : Field instance
The field of the variable holding the DOFs.
ts : TimeStepper instance
The time stepper.
functions : Functions instance
The registered functions.
problem : Problem instance, optional
The problem that can be passed to user functions as a context.
warn : bool, optional
If True, warn about BC on non-existent nodes.
Returns
-------
active_bcs : set
The set of boundary conditions active in the current time.
Notes
-----
- Periodic bc: master and slave DOFs must belong to the same
field (variables can differ, though).
"""
if bcs is None:
self._init_empty(field)
return set()
eq_ebc = nm.zeros((self.var_di.n_dof,), dtype=nm.int32)
val_ebc = nm.zeros((self.var_di.n_dof,), dtype=field.dtype)
master_slave = nm.zeros((self.var_di.n_dof,), dtype=nm.int32)
chains = []
active_bcs = set()
for bc in bcs:
# Skip conditions that are not active in the current time.
if not is_active_bc(bc, ts=ts, functions=functions):
continue
active_bcs.add(bc.key)
if isinstance(bc, DGEssentialBC):
ntype = "DGEBC"
region = bc.region
elif isinstance(bc, DGPeriodicBC):
ntype = "DGEPBC"
region = bc.regions[0]
elif isinstance(bc, EssentialBC):
ntype = 'EBC'
region = bc.region
elif isinstance(bc, PeriodicBC):
ntype = 'EPBC'
region = bc.regions[0]
if warn:
clean_msg = ('warning: ignoring nonexistent %s node (%s) in '
% (ntype, self.var_di.var_name))
else:
clean_msg = None
# Get master region nodes.
master_nod_list = field.get_dofs_in_region(region)
if len(master_nod_list) == 0:
continue
if ntype == 'EBC': # EBC.
dofs, val = bc.dofs
##
# Evaluate EBC values.
fun = get_condition_value(val, functions, 'EBC', bc.name)
if isinstance(fun, Function):
aux = fun
fun = lambda coors: aux(ts, coors,
bc=bc, problem=problem)
nods, vv = field.set_dofs(fun, region, len(dofs), clean_msg)
eq = expand_nodes_to_equations(nods, dofs, self.dof_names)
# Duplicates removed here...
eq_ebc[eq] = 1
if vv is not None: val_ebc[eq] = nm.ravel(vv)
elif ntype == "DGEBC":
dofs, val = bc.dofs
##
# Evaluate EBC values.
fun = get_condition_value(val, functions, 'EBC', bc.name)
if isinstance(fun, Function):
aux = fun
fun = lambda coors: aux(ts, coors,
bc=bc, problem=problem)
values = field.get_bc_facet_values(fun, region, diff=bc.diff)
bc2bfi = field.get_bc_facet_idx(region)
self.dg_ebc_val.setdefault(bc.diff, []).append(values)
self.dg_ebc.setdefault(bc.diff, []).append(bc2bfi)
self.n_dg_ebc += 1
elif ntype == "DGEPBC":
# ensure matching boundaries?
master_bc2bfi = field.get_bc_facet_idx(region)
slave_bc2bfi = field.get_bc_facet_idx(bc.regions[1])
self.dg_epbc.append((master_bc2bfi, slave_bc2bfi))
self.n_dg_epbc += 1
else: # EPBC.
region = bc.regions[1]
slave_nod_list = field.get_dofs_in_region(region)
nmaster = nm.unique(master_nod_list)
# Treat fields not covering the whole domain.
if nmaster[0] == -1:
nmaster = nmaster[1:]
nslave = nm.unique(slave_nod_list)
# Treat fields not covering the whole domain.
if nslave[0] == -1:
nslave = nslave[1:]
## print nmaster + 1
## print nslave + 1
if nmaster.shape != nslave.shape:
msg = 'EPBC list lengths do not match!\n(%s,\n %s)' %\
(nmaster, nslave)
raise ValueError(msg)
if (nmaster.shape[0] == 0) and (nslave.shape[0] == 0):
continue
mcoor = field.get_coor(nmaster)
scoor = field.get_coor(nslave)
fun = | get_condition_value(bc.match, functions, 'EPBC', bc.name) | sfepy.discrete.conditions.get_condition_value |
"""
Reference-physical domain mappings.
"""
import numpy as nm
from sfepy.base.base import Struct
class PhysicalQPs(Struct):
"""
Physical quadrature points in a region.
"""
def __init__(self, igs, n_total=0, is_uniform=True):
Struct.__init__(self, igs=igs, n_total=n_total, indx={}, rindx={},
n_per_group={}, shape={}, values={},
is_uniform=is_uniform)
for ig in self.igs:
self.indx[ig] = slice(None)
self.rindx[ig] = slice(None)
self.n_per_group[ig] = 0
self.shape[ig] = (0, 0, 0)
self.values[ig] = nm.empty(self.shape[ig], dtype=nm.float64)
def get_merged_values(self):
qps = nm.concatenate([self.values[ig] for ig in self.igs], axis=0)
return qps
def get_shape(self, rshape, ig=None):
"""
Get shape from raveled shape.
"""
if ig is None:
if self.is_uniform:
n_qp = self.shape[self.igs[0]][1]
else:
msg = 'ig argument must be given for non-uniform QPs!'
raise ValueError(msg)
else:
n_qp = self.shape[ig][1]
if (rshape[0] / n_qp) * n_qp != rshape[0]:
raise ValueError('incompatible shapes! (n_qp: %d, %s)'
% (n_qp, rshape))
shape = (rshape[0] / n_qp, n_qp) + rshape[1:]
return shape
class Mapping(Struct):
"""
Base class for mappings.
"""
@staticmethod
def from_args(region, kind='v', ig=None):
"""
Create mapping from reference to physical entities in a given
region, given the integration kind ('v' or 's').
This mapping can be used to compute the physical quadrature
points.
Parameters
----------
region : Region instance
The region defining the entities.
kind : 'v' or 's'
The kind of the entities: 'v' - cells, 's' - facets.
ig : int, optional
The group index.
Returns
-------
mapping : VolumeMapping or SurfaceMapping instance
The requested mapping.
"""
from sfepy.discrete.fem.domain import FEDomain
from sfepy.discrete.iga.domain import IGDomain
if isinstance(region.domain, FEDomain):
import sfepy.discrete.fem.mappings as mm
coors = region.domain.get_mesh_coors()
if kind == 's':
coors = coors[region.vertices]
gel = region.domain.groups[ig].gel
conn = region.domain.groups[ig].conn
if kind == 'v':
cells = region.get_cells(ig)
mapping = | mm.VolumeMapping(coors, conn[cells], gel=gel) | sfepy.discrete.iga.mappings.VolumeMapping |
"""
Reference-physical domain mappings.
"""
import numpy as nm
from sfepy.base.base import Struct
class PhysicalQPs(Struct):
"""
Physical quadrature points in a region.
"""
def __init__(self, igs, n_total=0, is_uniform=True):
Struct.__init__(self, igs=igs, n_total=n_total, indx={}, rindx={},
n_per_group={}, shape={}, values={},
is_uniform=is_uniform)
for ig in self.igs:
self.indx[ig] = slice(None)
self.rindx[ig] = slice(None)
self.n_per_group[ig] = 0
self.shape[ig] = (0, 0, 0)
self.values[ig] = nm.empty(self.shape[ig], dtype=nm.float64)
def get_merged_values(self):
qps = nm.concatenate([self.values[ig] for ig in self.igs], axis=0)
return qps
def get_shape(self, rshape, ig=None):
"""
Get shape from raveled shape.
"""
if ig is None:
if self.is_uniform:
n_qp = self.shape[self.igs[0]][1]
else:
msg = 'ig argument must be given for non-uniform QPs!'
raise ValueError(msg)
else:
n_qp = self.shape[ig][1]
if (rshape[0] / n_qp) * n_qp != rshape[0]:
raise ValueError('incompatible shapes! (n_qp: %d, %s)'
% (n_qp, rshape))
shape = (rshape[0] / n_qp, n_qp) + rshape[1:]
return shape
class Mapping(Struct):
"""
Base class for mappings.
"""
@staticmethod
def from_args(region, kind='v', ig=None):
"""
Create mapping from reference to physical entities in a given
region, given the integration kind ('v' or 's').
This mapping can be used to compute the physical quadrature
points.
Parameters
----------
region : Region instance
The region defining the entities.
kind : 'v' or 's'
The kind of the entities: 'v' - cells, 's' - facets.
ig : int, optional
The group index.
Returns
-------
mapping : VolumeMapping or SurfaceMapping instance
The requested mapping.
"""
from sfepy.discrete.fem.domain import FEDomain
from sfepy.discrete.iga.domain import IGDomain
if isinstance(region.domain, FEDomain):
import sfepy.discrete.fem.mappings as mm
coors = region.domain.get_mesh_coors()
if kind == 's':
coors = coors[region.vertices]
gel = region.domain.groups[ig].gel
conn = region.domain.groups[ig].conn
if kind == 'v':
cells = region.get_cells(ig)
mapping = mm.VolumeMapping(coors, conn[cells], gel=gel)
elif kind == 's':
from sfepy.discrete.fem.fe_surface import FESurface
aux = FESurface('aux', region, gel.get_surface_entities(),
conn , ig)
mapping = mm.SurfaceMapping(coors, aux.leconn,
gel=gel.surface_facet)
elif isinstance(region.domain, IGDomain):
import sfepy.discrete.iga.mappings as mm
mapping = | mm.IGMapping(region.domain, region.cells) | sfepy.discrete.iga.mappings.IGMapping |
# This example implements 2nd-level homogenization of Biot-Darcy-Brinkman model of flow in deformable
# double porous media.
# The mathematical model is described in:
#
#<NAME>., <NAME>., <NAME>.
#The Biot-Darcy-Brinkman model of flow in deformable double porous media; homogenization and numerical modelling.
# Computers and Mathematics with applications, 78(9):3044-3066, 2019,
# https://doi.org/10.1016/j.camwa.2019.04.004
#
# Run calculation of homogenized coefficients:
#
# ./homogen.py example_perfusion_BDB/perf_BDB_mes.py
#
# The results are stored in `example_perfusion_BDB/results/meso` directory.
#
import numpy as nm
from sfepy import data_dir
import os.path as osp
from sfepy.discrete.fem.mesh import Mesh
import sfepy.discrete.fem.periodic as per
from sfepy.mechanics.tensors import dim2sym
from sfepy.homogenization.utils import define_box_regions
import sfepy.homogenization.coefs_base as cb
from sfepy.homogenization.micmac import get_homog_coefs_linear
data_dir = 'example_perfusion_BDB'
def coefs2qp(coefs, nqp):
out = {}
for k, v in coefs.items():
if type(v) not in [nm.ndarray, float]:
continue
if type(v) is nm.ndarray:
if len(v.shape) >= 3:
out[k] = v
out[k] = nm.tile(v, (nqp, 1, 1))
return out
def get_periodic_bc(var_tab, dim=3, dim_tab=None):
if dim_tab is None:
dim_tab = {'x': ['left', 'right'],
'z': ['bottom', 'top'],
'y': ['near', 'far']}
periodic = {}
epbcs = {}
for ivar, reg in var_tab:
periodic['per_%s' % ivar] = pers = []
for idim in 'xyz'[0:dim]:
key = 'per_%s_%s' % (ivar, idim)
regs = ['%s_%s' % (reg, ii) for ii in dim_tab[idim]]
epbcs[key] = (regs, {'%s.all' % ivar: '%s.all' % ivar},
'match_%s_plane' % idim)
pers.append(key)
return epbcs, periodic
# get homogenized coefficients, recalculate them if necessary
def get_homog(coors, mode, pb,micro_filename, **kwargs):
if not (mode == 'qp'):
return
nqp = coors.shape[0]
coefs_filename = 'coefs_micro'
coefs_filename = osp.join(pb.conf.options.get('output_dir', '.'),
coefs_filename) + '.h5'
coefs = get_homog_coefs_linear(0, 0, None,
micro_filename=micro_filename,coefs_filename = coefs_filename )
coefs['B'] = coefs['B'][:, nm.newaxis]
for k in coefs.keys():
v = coefs[k]
if type(v) is nm.ndarray:
if len(v.shape) == 0:
coefs[k] = v.reshape((1, 1))
elif len(v.shape) == 1:
coefs[k] = v[:, nm.newaxis]
elif isinstance(v, float):
coefs[k] = nm.array([[v]])
out = coefs2qp(coefs, nqp)
return out
def define(filename_mesh=None):
eta = 3.6e-3
if filename_mesh is None:
filename_mesh = osp.join(data_dir, 'meso_perf_puc.vtk')
mesh = | Mesh.from_file(filename_mesh) | sfepy.discrete.fem.mesh.Mesh.from_file |
# This example implements 2nd-level homogenization of Biot-Darcy-Brinkman model of flow in deformable
# double porous media.
# The mathematical model is described in:
#
#<NAME>., <NAME>., <NAME>.
#The Biot-Darcy-Brinkman model of flow in deformable double porous media; homogenization and numerical modelling.
# Computers and Mathematics with applications, 78(9):3044-3066, 2019,
# https://doi.org/10.1016/j.camwa.2019.04.004
#
# Run calculation of homogenized coefficients:
#
# ./homogen.py example_perfusion_BDB/perf_BDB_mes.py
#
# The results are stored in `example_perfusion_BDB/results/meso` directory.
#
import numpy as nm
from sfepy import data_dir
import os.path as osp
from sfepy.discrete.fem.mesh import Mesh
import sfepy.discrete.fem.periodic as per
from sfepy.mechanics.tensors import dim2sym
from sfepy.homogenization.utils import define_box_regions
import sfepy.homogenization.coefs_base as cb
from sfepy.homogenization.micmac import get_homog_coefs_linear
data_dir = 'example_perfusion_BDB'
def coefs2qp(coefs, nqp):
out = {}
for k, v in coefs.items():
if type(v) not in [nm.ndarray, float]:
continue
if type(v) is nm.ndarray:
if len(v.shape) >= 3:
out[k] = v
out[k] = nm.tile(v, (nqp, 1, 1))
return out
def get_periodic_bc(var_tab, dim=3, dim_tab=None):
if dim_tab is None:
dim_tab = {'x': ['left', 'right'],
'z': ['bottom', 'top'],
'y': ['near', 'far']}
periodic = {}
epbcs = {}
for ivar, reg in var_tab:
periodic['per_%s' % ivar] = pers = []
for idim in 'xyz'[0:dim]:
key = 'per_%s_%s' % (ivar, idim)
regs = ['%s_%s' % (reg, ii) for ii in dim_tab[idim]]
epbcs[key] = (regs, {'%s.all' % ivar: '%s.all' % ivar},
'match_%s_plane' % idim)
pers.append(key)
return epbcs, periodic
# get homogenized coefficients, recalculate them if necessary
def get_homog(coors, mode, pb,micro_filename, **kwargs):
if not (mode == 'qp'):
return
nqp = coors.shape[0]
coefs_filename = 'coefs_micro'
coefs_filename = osp.join(pb.conf.options.get('output_dir', '.'),
coefs_filename) + '.h5'
coefs = get_homog_coefs_linear(0, 0, None,
micro_filename=micro_filename,coefs_filename = coefs_filename )
coefs['B'] = coefs['B'][:, nm.newaxis]
for k in coefs.keys():
v = coefs[k]
if type(v) is nm.ndarray:
if len(v.shape) == 0:
coefs[k] = v.reshape((1, 1))
elif len(v.shape) == 1:
coefs[k] = v[:, nm.newaxis]
elif isinstance(v, float):
coefs[k] = nm.array([[v]])
out = coefs2qp(coefs, nqp)
return out
def define(filename_mesh=None):
eta = 3.6e-3
if filename_mesh is None:
filename_mesh = osp.join(data_dir, 'meso_perf_puc.vtk')
mesh = Mesh.from_file(filename_mesh)
poroela_micro_file = osp.join(data_dir, 'perf_BDB_mic.py')
dim = 3
sym = (dim + 1) * dim // 2
sym_eye = 'nm.array([1,1,0])' if dim == 2 else 'nm.array([1,1,1,0,0,0])'
bbox = mesh.get_bounding_box()
regions = | define_box_regions(mesh.dim, bbox[0], bbox[1], eps=1e-3) | sfepy.homogenization.utils.define_box_regions |
# This example implements 2nd-level homogenization of Biot-Darcy-Brinkman model of flow in deformable
# double porous media.
# The mathematical model is described in:
#
#<NAME>., <NAME>., <NAME>.
#The Biot-Darcy-Brinkman model of flow in deformable double porous media; homogenization and numerical modelling.
# Computers and Mathematics with applications, 78(9):3044-3066, 2019,
# https://doi.org/10.1016/j.camwa.2019.04.004
#
# Run calculation of homogenized coefficients:
#
# ./homogen.py example_perfusion_BDB/perf_BDB_mes.py
#
# The results are stored in `example_perfusion_BDB/results/meso` directory.
#
import numpy as nm
from sfepy import data_dir
import os.path as osp
from sfepy.discrete.fem.mesh import Mesh
import sfepy.discrete.fem.periodic as per
from sfepy.mechanics.tensors import dim2sym
from sfepy.homogenization.utils import define_box_regions
import sfepy.homogenization.coefs_base as cb
from sfepy.homogenization.micmac import get_homog_coefs_linear
data_dir = 'example_perfusion_BDB'
def coefs2qp(coefs, nqp):
out = {}
for k, v in coefs.items():
if type(v) not in [nm.ndarray, float]:
continue
if type(v) is nm.ndarray:
if len(v.shape) >= 3:
out[k] = v
out[k] = nm.tile(v, (nqp, 1, 1))
return out
def get_periodic_bc(var_tab, dim=3, dim_tab=None):
if dim_tab is None:
dim_tab = {'x': ['left', 'right'],
'z': ['bottom', 'top'],
'y': ['near', 'far']}
periodic = {}
epbcs = {}
for ivar, reg in var_tab:
periodic['per_%s' % ivar] = pers = []
for idim in 'xyz'[0:dim]:
key = 'per_%s_%s' % (ivar, idim)
regs = ['%s_%s' % (reg, ii) for ii in dim_tab[idim]]
epbcs[key] = (regs, {'%s.all' % ivar: '%s.all' % ivar},
'match_%s_plane' % idim)
pers.append(key)
return epbcs, periodic
# get homogenized coefficients, recalculate them if necessary
def get_homog(coors, mode, pb,micro_filename, **kwargs):
if not (mode == 'qp'):
return
nqp = coors.shape[0]
coefs_filename = 'coefs_micro'
coefs_filename = osp.join(pb.conf.options.get('output_dir', '.'),
coefs_filename) + '.h5'
coefs = get_homog_coefs_linear(0, 0, None,
micro_filename=micro_filename,coefs_filename = coefs_filename )
coefs['B'] = coefs['B'][:, nm.newaxis]
for k in coefs.keys():
v = coefs[k]
if type(v) is nm.ndarray:
if len(v.shape) == 0:
coefs[k] = v.reshape((1, 1))
elif len(v.shape) == 1:
coefs[k] = v[:, nm.newaxis]
elif isinstance(v, float):
coefs[k] = nm.array([[v]])
out = coefs2qp(coefs, nqp)
return out
def define(filename_mesh=None):
eta = 3.6e-3
if filename_mesh is None:
filename_mesh = osp.join(data_dir, 'meso_perf_puc.vtk')
mesh = Mesh.from_file(filename_mesh)
poroela_micro_file = osp.join(data_dir, 'perf_BDB_mic.py')
dim = 3
sym = (dim + 1) * dim // 2
sym_eye = 'nm.array([1,1,0])' if dim == 2 else 'nm.array([1,1,1,0,0,0])'
bbox = mesh.get_bounding_box()
regions = define_box_regions(mesh.dim, bbox[0], bbox[1], eps=1e-3)
regions.update({
'Z': 'all',
'Gamma_Z': ('vertices of surface', 'facet'),
# matrix
'Zm': 'cells of group 1',
'Zm_left': ('r.Zm *v r.Left', 'vertex'),
'Zm_right': ('r.Zm *v r.Right', 'vertex'),
'Zm_bottom': ('r.Zm *v r.Bottom', 'vertex'),
'Zm_top': ('r.Zm *v r.Top', 'vertex'),
'Gamma_Zm': ('r.Zm *v r.Zc', 'facet', 'Zm'),
# canal
'Zc': 'cells of group 2',
'Zc0': ('r.Zc -v r.Gamma_Zc', 'vertex'),
'Zc_left': ('r.Zc0 *v r.Left', 'vertex'),
'Zc_right': ('r.Zc0 *v r.Right', 'vertex'),
'Zc_bottom': ('r.Zc0 *v r.Bottom', 'vertex'),
'Zc_top': ('r.Zc0 *v r.Top', 'vertex'),
'Gamma_Zc': ('r.Zm *v r.Zc', 'facet', 'Zc'),
"Surface": ("vertices of surface", "facet"),
'Center_c': ('vertex 5346', 'vertex'), # canal center
})
if dim == 3:
regions.update({
'Zm_far': ('r.Zm *v r.Far', 'vertex'),
'Zm_near': ('r.Zm *v r.Near', 'vertex'),
'Zc_far': ('r.Zc0 *v r.Far', 'vertex'),
'Zc_near': ('r.Zc0 *v r.Near', 'vertex'),
})
fields = {
'one': ('real', 'scalar', 'Z', 1),
'displacement': ('real', 'vector', 'Zm', 1),
'pressure_m': ('real', 'scalar', 'Zm', 1),
'pressure_c': ('real', 'scalar', 'Zc', 1),
'displacement_c': ('real', 'vector', 'Zc', 1),
'velocity': ('real', 'vector', 'Zc', 2),
}
variables = {
# displacement
'u': ('unknown field', 'displacement', 0),
'v': ('test field', 'displacement', 'u'),
'Pi_u': ('parameter field', 'displacement', 'u'),
'U1': ('parameter field', 'displacement', '(set-to-None)'),
'U2': ('parameter field', 'displacement', '(set-to-None)'),
'uc': ('unknown field', 'displacement_c', 4),
'vc': ('test field', 'displacement_c', 'uc'),
# velocity
'w': ('unknown field', 'velocity', 1),
'z': ('test field', 'velocity', 'w'),
'Pi_w': ('parameter field', 'velocity', 'w'),
'W1': ('parameter field', 'velocity', '(set-to-None)'),
'W2': ('parameter field', 'velocity', '(set-to-None)'),
# pressure
'pm': ('unknown field', 'pressure_m', 2),
'qm': ('test field', 'pressure_m', 'pm'),
'Pm1': ('parameter field', 'pressure_m', '(set-to-None)'),
'Pm2': ('parameter field', 'pressure_m', '(set-to-None)'),
'Pi_pm': ('parameter field', 'pressure_m', 'pm'),
'pc': ('unknown field', 'pressure_c', 3),
'qc': ('test field', 'pressure_c', 'pc'),
'Pc1': ('parameter field', 'pressure_c', '(set-to-None)'),
'Pc2': ('parameter field', 'pressure_c', '(set-to-None)'),
# one
'one': ('parameter field', 'one', '(set-to-None)'),
}
functions = {
'match_x_plane': (per.match_x_plane,),
'match_y_plane': (per.match_y_plane,),
'match_z_plane': (per.match_z_plane,),
'get_homog': (lambda ts, coors, mode=None, problem=None, **kwargs:\
get_homog(coors, mode, problem, poroela_micro_file, **kwargs),),
}
materials = {
'hmatrix': 'get_homog',
'fluid': ({
'eta_c': eta* nm.eye( | dim2sym(dim) | sfepy.mechanics.tensors.dim2sym |
import os
import numpy as nm
from sfepy.base.testing import TestCommon
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
test = Test(conf=conf, options=options)
test.join = lambda x: os.path.join(test.options.out_dir, x)
return test
def test_linearization(self):
from sfepy.base.base import Struct
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy import data_dir
geometries = ['2_3', '2_4', '3_4', '3_8']
approx_orders = [1, 2]
funs = [nm.cos, nm.sin, lambda x: x]
ok = True
for geometry in geometries:
name = os.path.join(data_dir,
'meshes/elements/%s_1.mesh' % geometry)
mesh = | Mesh.from_file(name) | sfepy.discrete.fem.Mesh.from_file |
import os
import numpy as nm
from sfepy.base.testing import TestCommon
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
test = Test(conf=conf, options=options)
test.join = lambda x: os.path.join(test.options.out_dir, x)
return test
def test_linearization(self):
from sfepy.base.base import Struct
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy import data_dir
geometries = ['2_3', '2_4', '3_4', '3_8']
approx_orders = [1, 2]
funs = [nm.cos, nm.sin, lambda x: x]
ok = True
for geometry in geometries:
name = os.path.join(data_dir,
'meshes/elements/%s_1.mesh' % geometry)
mesh = Mesh.from_file(name)
domain = | FEDomain('', mesh) | sfepy.discrete.fem.FEDomain |
import os.path as op
from sfepy.base.base import *
from sfepy.base.conf import transform_variables, transform_fields
from sfepy.base.testing import TestCommon
variables = {
'u' : ('unknown field', 'f', 0),
'v' : ('test field', 'f', 'u'),
}
def in_dir(adir):
return lambda x: op.join(adir, x)
def do_interpolation(m2, m1, data, field_name):
"""Interpolate data from m1 to m2. """
from sfepy.fem import Domain, Field, Variables
fields = {
'scalar_si' : ((1,1), 'Omega', 2),
'vector_si' : ((3,1), 'Omega', 2),
'scalar_tp' : ((1,1), 'Omega', 1),
'vector_tp' : ((3,1), 'Omega', 1),
}
d1 = | Domain('d1', m1) | sfepy.fem.Domain |
import os.path as op
from sfepy.base.base import *
from sfepy.base.conf import transform_variables, transform_fields
from sfepy.base.testing import TestCommon
variables = {
'u' : ('unknown field', 'f', 0),
'v' : ('test field', 'f', 'u'),
}
def in_dir(adir):
return lambda x: op.join(adir, x)
def do_interpolation(m2, m1, data, field_name):
"""Interpolate data from m1 to m2. """
from sfepy.fem import Domain, Field, Variables
fields = {
'scalar_si' : ((1,1), 'Omega', 2),
'vector_si' : ((3,1), 'Omega', 2),
'scalar_tp' : ((1,1), 'Omega', 1),
'vector_tp' : ((3,1), 'Omega', 1),
}
d1 = Domain('d1', m1)
omega1 = d1.create_region('Omega', 'all')
f = fields[field_name]
field1 = | Field('f', nm.float64, f[0], d1.regions[f[1]], approx_order=f[2]) | sfepy.fem.Field |
import os.path as op
from sfepy.base.base import *
from sfepy.base.conf import transform_variables, transform_fields
from sfepy.base.testing import TestCommon
variables = {
'u' : ('unknown field', 'f', 0),
'v' : ('test field', 'f', 'u'),
}
def in_dir(adir):
return lambda x: op.join(adir, x)
def do_interpolation(m2, m1, data, field_name):
"""Interpolate data from m1 to m2. """
from sfepy.fem import Domain, Field, Variables
fields = {
'scalar_si' : ((1,1), 'Omega', 2),
'vector_si' : ((3,1), 'Omega', 2),
'scalar_tp' : ((1,1), 'Omega', 1),
'vector_tp' : ((3,1), 'Omega', 1),
}
d1 = Domain('d1', m1)
omega1 = d1.create_region('Omega', 'all')
f = fields[field_name]
field1 = Field('f', nm.float64, f[0], d1.regions[f[1]], approx_order=f[2])
ff = {field1.name : field1}
vv = Variables.from_conf(transform_variables(variables), ff)
u1 = vv['u']
u1.set_from_mesh_vertices(data)
d2 = | Domain('d2', m2) | sfepy.fem.Domain |
import os.path as op
from sfepy.base.base import *
from sfepy.base.conf import transform_variables, transform_fields
from sfepy.base.testing import TestCommon
variables = {
'u' : ('unknown field', 'f', 0),
'v' : ('test field', 'f', 'u'),
}
def in_dir(adir):
return lambda x: op.join(adir, x)
def do_interpolation(m2, m1, data, field_name):
"""Interpolate data from m1 to m2. """
from sfepy.fem import Domain, Field, Variables
fields = {
'scalar_si' : ((1,1), 'Omega', 2),
'vector_si' : ((3,1), 'Omega', 2),
'scalar_tp' : ((1,1), 'Omega', 1),
'vector_tp' : ((3,1), 'Omega', 1),
}
d1 = Domain('d1', m1)
omega1 = d1.create_region('Omega', 'all')
f = fields[field_name]
field1 = Field('f', nm.float64, f[0], d1.regions[f[1]], approx_order=f[2])
ff = {field1.name : field1}
vv = Variables.from_conf(transform_variables(variables), ff)
u1 = vv['u']
u1.set_from_mesh_vertices(data)
d2 = Domain('d2', m2)
omega2 = d2.create_region('Omega', 'all')
field2 = | Field('f', nm.float64, f[0], d2.regions[f[1]], approx_order=f[2]) | sfepy.fem.Field |
import os.path as op
from sfepy.base.base import *
from sfepy.base.conf import transform_variables, transform_fields
from sfepy.base.testing import TestCommon
variables = {
'u' : ('unknown field', 'f', 0),
'v' : ('test field', 'f', 'u'),
}
def in_dir(adir):
return lambda x: op.join(adir, x)
def do_interpolation(m2, m1, data, field_name):
"""Interpolate data from m1 to m2. """
from sfepy.fem import Domain, Field, Variables
fields = {
'scalar_si' : ((1,1), 'Omega', 2),
'vector_si' : ((3,1), 'Omega', 2),
'scalar_tp' : ((1,1), 'Omega', 1),
'vector_tp' : ((3,1), 'Omega', 1),
}
d1 = Domain('d1', m1)
omega1 = d1.create_region('Omega', 'all')
f = fields[field_name]
field1 = Field('f', nm.float64, f[0], d1.regions[f[1]], approx_order=f[2])
ff = {field1.name : field1}
vv = Variables.from_conf( | transform_variables(variables) | sfepy.base.conf.transform_variables |
import os.path as op
from sfepy.base.base import *
from sfepy.base.conf import transform_variables, transform_fields
from sfepy.base.testing import TestCommon
variables = {
'u' : ('unknown field', 'f', 0),
'v' : ('test field', 'f', 'u'),
}
def in_dir(adir):
return lambda x: op.join(adir, x)
def do_interpolation(m2, m1, data, field_name):
"""Interpolate data from m1 to m2. """
from sfepy.fem import Domain, Field, Variables
fields = {
'scalar_si' : ((1,1), 'Omega', 2),
'vector_si' : ((3,1), 'Omega', 2),
'scalar_tp' : ((1,1), 'Omega', 1),
'vector_tp' : ((3,1), 'Omega', 1),
}
d1 = Domain('d1', m1)
omega1 = d1.create_region('Omega', 'all')
f = fields[field_name]
field1 = Field('f', nm.float64, f[0], d1.regions[f[1]], approx_order=f[2])
ff = {field1.name : field1}
vv = Variables.from_conf(transform_variables(variables), ff)
u1 = vv['u']
u1.set_from_mesh_vertices(data)
d2 = Domain('d2', m2)
omega2 = d2.create_region('Omega', 'all')
field2 = Field('f', nm.float64, f[0], d2.regions[f[1]], approx_order=f[2])
ff2 = {field2.name : field2}
vv2 = Variables.from_conf( | transform_variables(variables) | sfepy.base.conf.transform_variables |
import os.path as op
from sfepy.base.base import *
from sfepy.base.conf import transform_variables, transform_fields
from sfepy.base.testing import TestCommon
variables = {
'u' : ('unknown field', 'f', 0),
'v' : ('test field', 'f', 'u'),
}
def in_dir(adir):
return lambda x: op.join(adir, x)
def do_interpolation(m2, m1, data, field_name):
"""Interpolate data from m1 to m2. """
from sfepy.fem import Domain, Field, Variables
fields = {
'scalar_si' : ((1,1), 'Omega', 2),
'vector_si' : ((3,1), 'Omega', 2),
'scalar_tp' : ((1,1), 'Omega', 1),
'vector_tp' : ((3,1), 'Omega', 1),
}
d1 = Domain('d1', m1)
omega1 = d1.create_region('Omega', 'all')
f = fields[field_name]
field1 = Field('f', nm.float64, f[0], d1.regions[f[1]], approx_order=f[2])
ff = {field1.name : field1}
vv = Variables.from_conf(transform_variables(variables), ff)
u1 = vv['u']
u1.set_from_mesh_vertices(data)
d2 = Domain('d2', m2)
omega2 = d2.create_region('Omega', 'all')
field2 = Field('f', nm.float64, f[0], d2.regions[f[1]], approx_order=f[2])
ff2 = {field2.name : field2}
vv2 = Variables.from_conf(transform_variables(variables), ff2)
u2 = vv2['u']
# Performs interpolation, if other field differs from self.field
# or, in particular, is defined on a different mesh.
u2.set_from_other(u1, strategy='interpolation', close_limit=0.5)
return u1, u2
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
test = Test(conf=conf, options=options)
return test
def test_interpolation(self):
from sfepy import data_dir
from sfepy.fem import Mesh
from sfepy.linalg import make_axis_rotation_matrix
fname = in_dir(self.options.out_dir)
meshes = {
'tp' : Mesh('original mesh', data_dir + '/meshes/3d/block.mesh'),
'si' : Mesh('original mesh', data_dir + '/meshes/3d/cylinder.mesh'),
}
datas = {}
for key, mesh in meshes.iteritems():
bbox = mesh.get_bounding_box()
nx = bbox[1,0] - bbox[0,0]
centre = 0.5 * bbox.sum(axis=0)
mesh.coors -= centre
data = nm.sin(4.0 * nm.pi * mesh.coors[:,0:1] / nx)
datas['scalar_' + key] = data
data = nm.zeros_like(mesh.coors)
data[:,0] = 0.05 * nx * nm.sin(4.0 * nm.pi * mesh.coors[:,0] / nx)
data[:,2] = 0.05 * nx * nm.cos(4.0 * nm.pi * mesh.coors[:,0] / nx)
datas['vector_' + key] = data
for field_name in ['scalar_si', 'vector_si', 'scalar_tp', 'vector_tp']:
m1 = meshes[field_name[-2:]]
for ia, angle in enumerate(nm.linspace(0.0, nm.pi, 11)):
self.report('%s: %d. angle: %f' % (field_name, ia, angle))
shift = [0.0, 0.0, 0.0]
mtx = make_axis_rotation_matrix([0, 1, 0], angle)
m2 = m1.copy('rotated mesh')
m2.transform_coors(mtx)
data = datas[field_name]
u1, u2 = do_interpolation(m2, m1, data, field_name)
if ia == 0:
u1.save_as_mesh(fname('test_mesh_interp_%s_u1.vtk'
% field_name))
u2.save_as_mesh(fname('test_mesh_interp_%s_u2.%03d.vtk'
% (field_name, ia)))
return True
def test_interpolation_two_meshes(self):
from sfepy import data_dir
from sfepy.fem import Mesh, Domain, Field, Variables
m1 = | Mesh('source mesh', data_dir + '/meshes/3d/block.mesh') | sfepy.fem.Mesh |
import os.path as op
from sfepy.base.base import *
from sfepy.base.conf import transform_variables, transform_fields
from sfepy.base.testing import TestCommon
variables = {
'u' : ('unknown field', 'f', 0),
'v' : ('test field', 'f', 'u'),
}
def in_dir(adir):
return lambda x: op.join(adir, x)
def do_interpolation(m2, m1, data, field_name):
"""Interpolate data from m1 to m2. """
from sfepy.fem import Domain, Field, Variables
fields = {
'scalar_si' : ((1,1), 'Omega', 2),
'vector_si' : ((3,1), 'Omega', 2),
'scalar_tp' : ((1,1), 'Omega', 1),
'vector_tp' : ((3,1), 'Omega', 1),
}
d1 = Domain('d1', m1)
omega1 = d1.create_region('Omega', 'all')
f = fields[field_name]
field1 = Field('f', nm.float64, f[0], d1.regions[f[1]], approx_order=f[2])
ff = {field1.name : field1}
vv = Variables.from_conf(transform_variables(variables), ff)
u1 = vv['u']
u1.set_from_mesh_vertices(data)
d2 = Domain('d2', m2)
omega2 = d2.create_region('Omega', 'all')
field2 = Field('f', nm.float64, f[0], d2.regions[f[1]], approx_order=f[2])
ff2 = {field2.name : field2}
vv2 = Variables.from_conf(transform_variables(variables), ff2)
u2 = vv2['u']
# Performs interpolation, if other field differs from self.field
# or, in particular, is defined on a different mesh.
u2.set_from_other(u1, strategy='interpolation', close_limit=0.5)
return u1, u2
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
test = Test(conf=conf, options=options)
return test
def test_interpolation(self):
from sfepy import data_dir
from sfepy.fem import Mesh
from sfepy.linalg import make_axis_rotation_matrix
fname = in_dir(self.options.out_dir)
meshes = {
'tp' : Mesh('original mesh', data_dir + '/meshes/3d/block.mesh'),
'si' : Mesh('original mesh', data_dir + '/meshes/3d/cylinder.mesh'),
}
datas = {}
for key, mesh in meshes.iteritems():
bbox = mesh.get_bounding_box()
nx = bbox[1,0] - bbox[0,0]
centre = 0.5 * bbox.sum(axis=0)
mesh.coors -= centre
data = nm.sin(4.0 * nm.pi * mesh.coors[:,0:1] / nx)
datas['scalar_' + key] = data
data = nm.zeros_like(mesh.coors)
data[:,0] = 0.05 * nx * nm.sin(4.0 * nm.pi * mesh.coors[:,0] / nx)
data[:,2] = 0.05 * nx * nm.cos(4.0 * nm.pi * mesh.coors[:,0] / nx)
datas['vector_' + key] = data
for field_name in ['scalar_si', 'vector_si', 'scalar_tp', 'vector_tp']:
m1 = meshes[field_name[-2:]]
for ia, angle in enumerate(nm.linspace(0.0, nm.pi, 11)):
self.report('%s: %d. angle: %f' % (field_name, ia, angle))
shift = [0.0, 0.0, 0.0]
mtx = make_axis_rotation_matrix([0, 1, 0], angle)
m2 = m1.copy('rotated mesh')
m2.transform_coors(mtx)
data = datas[field_name]
u1, u2 = do_interpolation(m2, m1, data, field_name)
if ia == 0:
u1.save_as_mesh(fname('test_mesh_interp_%s_u1.vtk'
% field_name))
u2.save_as_mesh(fname('test_mesh_interp_%s_u2.%03d.vtk'
% (field_name, ia)))
return True
def test_interpolation_two_meshes(self):
from sfepy import data_dir
from sfepy.fem import Mesh, Domain, Field, Variables
m1 = Mesh('source mesh', data_dir + '/meshes/3d/block.mesh')
m2 = | Mesh('target mesh', data_dir + '/meshes/3d/cube_medium_tetra.mesh') | sfepy.fem.Mesh |
import os.path as op
from sfepy.base.base import *
from sfepy.base.conf import transform_variables, transform_fields
from sfepy.base.testing import TestCommon
variables = {
'u' : ('unknown field', 'f', 0),
'v' : ('test field', 'f', 'u'),
}
def in_dir(adir):
return lambda x: op.join(adir, x)
def do_interpolation(m2, m1, data, field_name):
"""Interpolate data from m1 to m2. """
from sfepy.fem import Domain, Field, Variables
fields = {
'scalar_si' : ((1,1), 'Omega', 2),
'vector_si' : ((3,1), 'Omega', 2),
'scalar_tp' : ((1,1), 'Omega', 1),
'vector_tp' : ((3,1), 'Omega', 1),
}
d1 = Domain('d1', m1)
omega1 = d1.create_region('Omega', 'all')
f = fields[field_name]
field1 = Field('f', nm.float64, f[0], d1.regions[f[1]], approx_order=f[2])
ff = {field1.name : field1}
vv = Variables.from_conf(transform_variables(variables), ff)
u1 = vv['u']
u1.set_from_mesh_vertices(data)
d2 = Domain('d2', m2)
omega2 = d2.create_region('Omega', 'all')
field2 = Field('f', nm.float64, f[0], d2.regions[f[1]], approx_order=f[2])
ff2 = {field2.name : field2}
vv2 = Variables.from_conf(transform_variables(variables), ff2)
u2 = vv2['u']
# Performs interpolation, if other field differs from self.field
# or, in particular, is defined on a different mesh.
u2.set_from_other(u1, strategy='interpolation', close_limit=0.5)
return u1, u2
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
test = Test(conf=conf, options=options)
return test
def test_interpolation(self):
from sfepy import data_dir
from sfepy.fem import Mesh
from sfepy.linalg import make_axis_rotation_matrix
fname = in_dir(self.options.out_dir)
meshes = {
'tp' : Mesh('original mesh', data_dir + '/meshes/3d/block.mesh'),
'si' : Mesh('original mesh', data_dir + '/meshes/3d/cylinder.mesh'),
}
datas = {}
for key, mesh in meshes.iteritems():
bbox = mesh.get_bounding_box()
nx = bbox[1,0] - bbox[0,0]
centre = 0.5 * bbox.sum(axis=0)
mesh.coors -= centre
data = nm.sin(4.0 * nm.pi * mesh.coors[:,0:1] / nx)
datas['scalar_' + key] = data
data = nm.zeros_like(mesh.coors)
data[:,0] = 0.05 * nx * nm.sin(4.0 * nm.pi * mesh.coors[:,0] / nx)
data[:,2] = 0.05 * nx * nm.cos(4.0 * nm.pi * mesh.coors[:,0] / nx)
datas['vector_' + key] = data
for field_name in ['scalar_si', 'vector_si', 'scalar_tp', 'vector_tp']:
m1 = meshes[field_name[-2:]]
for ia, angle in enumerate(nm.linspace(0.0, nm.pi, 11)):
self.report('%s: %d. angle: %f' % (field_name, ia, angle))
shift = [0.0, 0.0, 0.0]
mtx = make_axis_rotation_matrix([0, 1, 0], angle)
m2 = m1.copy('rotated mesh')
m2.transform_coors(mtx)
data = datas[field_name]
u1, u2 = do_interpolation(m2, m1, data, field_name)
if ia == 0:
u1.save_as_mesh(fname('test_mesh_interp_%s_u1.vtk'
% field_name))
u2.save_as_mesh(fname('test_mesh_interp_%s_u2.%03d.vtk'
% (field_name, ia)))
return True
def test_interpolation_two_meshes(self):
from sfepy import data_dir
from sfepy.fem import Mesh, Domain, Field, Variables
m1 = Mesh('source mesh', data_dir + '/meshes/3d/block.mesh')
m2 = Mesh('target mesh', data_dir + '/meshes/3d/cube_medium_tetra.mesh')
m2.coors *= 2.0
bbox = m1.get_bounding_box()
dd = bbox[1,:] - bbox[0,:]
data = nm.sin(4.0 * nm.pi * m1.coors[:,0:1] / dd[0]) \
* nm.cos(4.0 * nm.pi * m1.coors[:,1:2] / dd[1])
variables1 = {
'u' : ('unknown field', 'scalar_tp', 0),
'v' : ('test field', 'scalar_tp', 'u'),
}
variables2 = {
'u' : ('unknown field', 'scalar_si', 0),
'v' : ('test field', 'scalar_si', 'u'),
}
d1 = | Domain('d1', m1) | sfepy.fem.Domain |
import os.path as op
from sfepy.base.base import *
from sfepy.base.conf import transform_variables, transform_fields
from sfepy.base.testing import TestCommon
variables = {
'u' : ('unknown field', 'f', 0),
'v' : ('test field', 'f', 'u'),
}
def in_dir(adir):
return lambda x: op.join(adir, x)
def do_interpolation(m2, m1, data, field_name):
"""Interpolate data from m1 to m2. """
from sfepy.fem import Domain, Field, Variables
fields = {
'scalar_si' : ((1,1), 'Omega', 2),
'vector_si' : ((3,1), 'Omega', 2),
'scalar_tp' : ((1,1), 'Omega', 1),
'vector_tp' : ((3,1), 'Omega', 1),
}
d1 = Domain('d1', m1)
omega1 = d1.create_region('Omega', 'all')
f = fields[field_name]
field1 = Field('f', nm.float64, f[0], d1.regions[f[1]], approx_order=f[2])
ff = {field1.name : field1}
vv = Variables.from_conf(transform_variables(variables), ff)
u1 = vv['u']
u1.set_from_mesh_vertices(data)
d2 = Domain('d2', m2)
omega2 = d2.create_region('Omega', 'all')
field2 = Field('f', nm.float64, f[0], d2.regions[f[1]], approx_order=f[2])
ff2 = {field2.name : field2}
vv2 = Variables.from_conf(transform_variables(variables), ff2)
u2 = vv2['u']
# Performs interpolation, if other field differs from self.field
# or, in particular, is defined on a different mesh.
u2.set_from_other(u1, strategy='interpolation', close_limit=0.5)
return u1, u2
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
test = Test(conf=conf, options=options)
return test
def test_interpolation(self):
from sfepy import data_dir
from sfepy.fem import Mesh
from sfepy.linalg import make_axis_rotation_matrix
fname = in_dir(self.options.out_dir)
meshes = {
'tp' : Mesh('original mesh', data_dir + '/meshes/3d/block.mesh'),
'si' : Mesh('original mesh', data_dir + '/meshes/3d/cylinder.mesh'),
}
datas = {}
for key, mesh in meshes.iteritems():
bbox = mesh.get_bounding_box()
nx = bbox[1,0] - bbox[0,0]
centre = 0.5 * bbox.sum(axis=0)
mesh.coors -= centre
data = nm.sin(4.0 * nm.pi * mesh.coors[:,0:1] / nx)
datas['scalar_' + key] = data
data = nm.zeros_like(mesh.coors)
data[:,0] = 0.05 * nx * nm.sin(4.0 * nm.pi * mesh.coors[:,0] / nx)
data[:,2] = 0.05 * nx * nm.cos(4.0 * nm.pi * mesh.coors[:,0] / nx)
datas['vector_' + key] = data
for field_name in ['scalar_si', 'vector_si', 'scalar_tp', 'vector_tp']:
m1 = meshes[field_name[-2:]]
for ia, angle in enumerate(nm.linspace(0.0, nm.pi, 11)):
self.report('%s: %d. angle: %f' % (field_name, ia, angle))
shift = [0.0, 0.0, 0.0]
mtx = make_axis_rotation_matrix([0, 1, 0], angle)
m2 = m1.copy('rotated mesh')
m2.transform_coors(mtx)
data = datas[field_name]
u1, u2 = do_interpolation(m2, m1, data, field_name)
if ia == 0:
u1.save_as_mesh(fname('test_mesh_interp_%s_u1.vtk'
% field_name))
u2.save_as_mesh(fname('test_mesh_interp_%s_u2.%03d.vtk'
% (field_name, ia)))
return True
def test_interpolation_two_meshes(self):
from sfepy import data_dir
from sfepy.fem import Mesh, Domain, Field, Variables
m1 = Mesh('source mesh', data_dir + '/meshes/3d/block.mesh')
m2 = Mesh('target mesh', data_dir + '/meshes/3d/cube_medium_tetra.mesh')
m2.coors *= 2.0
bbox = m1.get_bounding_box()
dd = bbox[1,:] - bbox[0,:]
data = nm.sin(4.0 * nm.pi * m1.coors[:,0:1] / dd[0]) \
* nm.cos(4.0 * nm.pi * m1.coors[:,1:2] / dd[1])
variables1 = {
'u' : ('unknown field', 'scalar_tp', 0),
'v' : ('test field', 'scalar_tp', 'u'),
}
variables2 = {
'u' : ('unknown field', 'scalar_si', 0),
'v' : ('test field', 'scalar_si', 'u'),
}
d1 = Domain('d1', m1)
omega1 = d1.create_region('Omega', 'all')
field1 = | Field('scalar_tp', nm.float64, (1,1), omega1, approx_order=1) | sfepy.fem.Field |
import os.path as op
from sfepy.base.base import *
from sfepy.base.conf import transform_variables, transform_fields
from sfepy.base.testing import TestCommon
variables = {
'u' : ('unknown field', 'f', 0),
'v' : ('test field', 'f', 'u'),
}
def in_dir(adir):
return lambda x: op.join(adir, x)
def do_interpolation(m2, m1, data, field_name):
"""Interpolate data from m1 to m2. """
from sfepy.fem import Domain, Field, Variables
fields = {
'scalar_si' : ((1,1), 'Omega', 2),
'vector_si' : ((3,1), 'Omega', 2),
'scalar_tp' : ((1,1), 'Omega', 1),
'vector_tp' : ((3,1), 'Omega', 1),
}
d1 = Domain('d1', m1)
omega1 = d1.create_region('Omega', 'all')
f = fields[field_name]
field1 = Field('f', nm.float64, f[0], d1.regions[f[1]], approx_order=f[2])
ff = {field1.name : field1}
vv = Variables.from_conf(transform_variables(variables), ff)
u1 = vv['u']
u1.set_from_mesh_vertices(data)
d2 = Domain('d2', m2)
omega2 = d2.create_region('Omega', 'all')
field2 = Field('f', nm.float64, f[0], d2.regions[f[1]], approx_order=f[2])
ff2 = {field2.name : field2}
vv2 = Variables.from_conf(transform_variables(variables), ff2)
u2 = vv2['u']
# Performs interpolation, if other field differs from self.field
# or, in particular, is defined on a different mesh.
u2.set_from_other(u1, strategy='interpolation', close_limit=0.5)
return u1, u2
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
test = Test(conf=conf, options=options)
return test
def test_interpolation(self):
from sfepy import data_dir
from sfepy.fem import Mesh
from sfepy.linalg import make_axis_rotation_matrix
fname = in_dir(self.options.out_dir)
meshes = {
'tp' : Mesh('original mesh', data_dir + '/meshes/3d/block.mesh'),
'si' : Mesh('original mesh', data_dir + '/meshes/3d/cylinder.mesh'),
}
datas = {}
for key, mesh in meshes.iteritems():
bbox = mesh.get_bounding_box()
nx = bbox[1,0] - bbox[0,0]
centre = 0.5 * bbox.sum(axis=0)
mesh.coors -= centre
data = nm.sin(4.0 * nm.pi * mesh.coors[:,0:1] / nx)
datas['scalar_' + key] = data
data = nm.zeros_like(mesh.coors)
data[:,0] = 0.05 * nx * nm.sin(4.0 * nm.pi * mesh.coors[:,0] / nx)
data[:,2] = 0.05 * nx * nm.cos(4.0 * nm.pi * mesh.coors[:,0] / nx)
datas['vector_' + key] = data
for field_name in ['scalar_si', 'vector_si', 'scalar_tp', 'vector_tp']:
m1 = meshes[field_name[-2:]]
for ia, angle in enumerate(nm.linspace(0.0, nm.pi, 11)):
self.report('%s: %d. angle: %f' % (field_name, ia, angle))
shift = [0.0, 0.0, 0.0]
mtx = make_axis_rotation_matrix([0, 1, 0], angle)
m2 = m1.copy('rotated mesh')
m2.transform_coors(mtx)
data = datas[field_name]
u1, u2 = do_interpolation(m2, m1, data, field_name)
if ia == 0:
u1.save_as_mesh(fname('test_mesh_interp_%s_u1.vtk'
% field_name))
u2.save_as_mesh(fname('test_mesh_interp_%s_u2.%03d.vtk'
% (field_name, ia)))
return True
def test_interpolation_two_meshes(self):
from sfepy import data_dir
from sfepy.fem import Mesh, Domain, Field, Variables
m1 = Mesh('source mesh', data_dir + '/meshes/3d/block.mesh')
m2 = Mesh('target mesh', data_dir + '/meshes/3d/cube_medium_tetra.mesh')
m2.coors *= 2.0
bbox = m1.get_bounding_box()
dd = bbox[1,:] - bbox[0,:]
data = nm.sin(4.0 * nm.pi * m1.coors[:,0:1] / dd[0]) \
* nm.cos(4.0 * nm.pi * m1.coors[:,1:2] / dd[1])
variables1 = {
'u' : ('unknown field', 'scalar_tp', 0),
'v' : ('test field', 'scalar_tp', 'u'),
}
variables2 = {
'u' : ('unknown field', 'scalar_si', 0),
'v' : ('test field', 'scalar_si', 'u'),
}
d1 = Domain('d1', m1)
omega1 = d1.create_region('Omega', 'all')
field1 = Field('scalar_tp', nm.float64, (1,1), omega1, approx_order=1)
ff1 = {field1.name : field1}
d2 = | Domain('d2', m2) | sfepy.fem.Domain |
import os.path as op
from sfepy.base.base import *
from sfepy.base.conf import transform_variables, transform_fields
from sfepy.base.testing import TestCommon
variables = {
'u' : ('unknown field', 'f', 0),
'v' : ('test field', 'f', 'u'),
}
def in_dir(adir):
return lambda x: op.join(adir, x)
def do_interpolation(m2, m1, data, field_name):
"""Interpolate data from m1 to m2. """
from sfepy.fem import Domain, Field, Variables
fields = {
'scalar_si' : ((1,1), 'Omega', 2),
'vector_si' : ((3,1), 'Omega', 2),
'scalar_tp' : ((1,1), 'Omega', 1),
'vector_tp' : ((3,1), 'Omega', 1),
}
d1 = Domain('d1', m1)
omega1 = d1.create_region('Omega', 'all')
f = fields[field_name]
field1 = Field('f', nm.float64, f[0], d1.regions[f[1]], approx_order=f[2])
ff = {field1.name : field1}
vv = Variables.from_conf(transform_variables(variables), ff)
u1 = vv['u']
u1.set_from_mesh_vertices(data)
d2 = Domain('d2', m2)
omega2 = d2.create_region('Omega', 'all')
field2 = Field('f', nm.float64, f[0], d2.regions[f[1]], approx_order=f[2])
ff2 = {field2.name : field2}
vv2 = Variables.from_conf(transform_variables(variables), ff2)
u2 = vv2['u']
# Performs interpolation, if other field differs from self.field
# or, in particular, is defined on a different mesh.
u2.set_from_other(u1, strategy='interpolation', close_limit=0.5)
return u1, u2
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
test = Test(conf=conf, options=options)
return test
def test_interpolation(self):
from sfepy import data_dir
from sfepy.fem import Mesh
from sfepy.linalg import make_axis_rotation_matrix
fname = in_dir(self.options.out_dir)
meshes = {
'tp' : Mesh('original mesh', data_dir + '/meshes/3d/block.mesh'),
'si' : Mesh('original mesh', data_dir + '/meshes/3d/cylinder.mesh'),
}
datas = {}
for key, mesh in meshes.iteritems():
bbox = mesh.get_bounding_box()
nx = bbox[1,0] - bbox[0,0]
centre = 0.5 * bbox.sum(axis=0)
mesh.coors -= centre
data = nm.sin(4.0 * nm.pi * mesh.coors[:,0:1] / nx)
datas['scalar_' + key] = data
data = nm.zeros_like(mesh.coors)
data[:,0] = 0.05 * nx * nm.sin(4.0 * nm.pi * mesh.coors[:,0] / nx)
data[:,2] = 0.05 * nx * nm.cos(4.0 * nm.pi * mesh.coors[:,0] / nx)
datas['vector_' + key] = data
for field_name in ['scalar_si', 'vector_si', 'scalar_tp', 'vector_tp']:
m1 = meshes[field_name[-2:]]
for ia, angle in enumerate(nm.linspace(0.0, nm.pi, 11)):
self.report('%s: %d. angle: %f' % (field_name, ia, angle))
shift = [0.0, 0.0, 0.0]
mtx = make_axis_rotation_matrix([0, 1, 0], angle)
m2 = m1.copy('rotated mesh')
m2.transform_coors(mtx)
data = datas[field_name]
u1, u2 = do_interpolation(m2, m1, data, field_name)
if ia == 0:
u1.save_as_mesh(fname('test_mesh_interp_%s_u1.vtk'
% field_name))
u2.save_as_mesh(fname('test_mesh_interp_%s_u2.%03d.vtk'
% (field_name, ia)))
return True
def test_interpolation_two_meshes(self):
from sfepy import data_dir
from sfepy.fem import Mesh, Domain, Field, Variables
m1 = Mesh('source mesh', data_dir + '/meshes/3d/block.mesh')
m2 = Mesh('target mesh', data_dir + '/meshes/3d/cube_medium_tetra.mesh')
m2.coors *= 2.0
bbox = m1.get_bounding_box()
dd = bbox[1,:] - bbox[0,:]
data = nm.sin(4.0 * nm.pi * m1.coors[:,0:1] / dd[0]) \
* nm.cos(4.0 * nm.pi * m1.coors[:,1:2] / dd[1])
variables1 = {
'u' : ('unknown field', 'scalar_tp', 0),
'v' : ('test field', 'scalar_tp', 'u'),
}
variables2 = {
'u' : ('unknown field', 'scalar_si', 0),
'v' : ('test field', 'scalar_si', 'u'),
}
d1 = Domain('d1', m1)
omega1 = d1.create_region('Omega', 'all')
field1 = Field('scalar_tp', nm.float64, (1,1), omega1, approx_order=1)
ff1 = {field1.name : field1}
d2 = Domain('d2', m2)
omega2 = d2.create_region('Omega', 'all')
field2 = | Field('scalar_si', nm.float64, (1,1), omega2, approx_order=0) | sfepy.fem.Field |
import os.path as op
from sfepy.base.base import *
from sfepy.base.conf import transform_variables, transform_fields
from sfepy.base.testing import TestCommon
variables = {
'u' : ('unknown field', 'f', 0),
'v' : ('test field', 'f', 'u'),
}
def in_dir(adir):
return lambda x: op.join(adir, x)
def do_interpolation(m2, m1, data, field_name):
"""Interpolate data from m1 to m2. """
from sfepy.fem import Domain, Field, Variables
fields = {
'scalar_si' : ((1,1), 'Omega', 2),
'vector_si' : ((3,1), 'Omega', 2),
'scalar_tp' : ((1,1), 'Omega', 1),
'vector_tp' : ((3,1), 'Omega', 1),
}
d1 = Domain('d1', m1)
omega1 = d1.create_region('Omega', 'all')
f = fields[field_name]
field1 = Field('f', nm.float64, f[0], d1.regions[f[1]], approx_order=f[2])
ff = {field1.name : field1}
vv = Variables.from_conf(transform_variables(variables), ff)
u1 = vv['u']
u1.set_from_mesh_vertices(data)
d2 = Domain('d2', m2)
omega2 = d2.create_region('Omega', 'all')
field2 = Field('f', nm.float64, f[0], d2.regions[f[1]], approx_order=f[2])
ff2 = {field2.name : field2}
vv2 = Variables.from_conf(transform_variables(variables), ff2)
u2 = vv2['u']
# Performs interpolation, if other field differs from self.field
# or, in particular, is defined on a different mesh.
u2.set_from_other(u1, strategy='interpolation', close_limit=0.5)
return u1, u2
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
test = Test(conf=conf, options=options)
return test
def test_interpolation(self):
from sfepy import data_dir
from sfepy.fem import Mesh
from sfepy.linalg import make_axis_rotation_matrix
fname = in_dir(self.options.out_dir)
meshes = {
'tp' : | Mesh('original mesh', data_dir + '/meshes/3d/block.mesh') | sfepy.fem.Mesh |
import os.path as op
from sfepy.base.base import *
from sfepy.base.conf import transform_variables, transform_fields
from sfepy.base.testing import TestCommon
variables = {
'u' : ('unknown field', 'f', 0),
'v' : ('test field', 'f', 'u'),
}
def in_dir(adir):
return lambda x: op.join(adir, x)
def do_interpolation(m2, m1, data, field_name):
"""Interpolate data from m1 to m2. """
from sfepy.fem import Domain, Field, Variables
fields = {
'scalar_si' : ((1,1), 'Omega', 2),
'vector_si' : ((3,1), 'Omega', 2),
'scalar_tp' : ((1,1), 'Omega', 1),
'vector_tp' : ((3,1), 'Omega', 1),
}
d1 = Domain('d1', m1)
omega1 = d1.create_region('Omega', 'all')
f = fields[field_name]
field1 = Field('f', nm.float64, f[0], d1.regions[f[1]], approx_order=f[2])
ff = {field1.name : field1}
vv = Variables.from_conf(transform_variables(variables), ff)
u1 = vv['u']
u1.set_from_mesh_vertices(data)
d2 = Domain('d2', m2)
omega2 = d2.create_region('Omega', 'all')
field2 = Field('f', nm.float64, f[0], d2.regions[f[1]], approx_order=f[2])
ff2 = {field2.name : field2}
vv2 = Variables.from_conf(transform_variables(variables), ff2)
u2 = vv2['u']
# Performs interpolation, if other field differs from self.field
# or, in particular, is defined on a different mesh.
u2.set_from_other(u1, strategy='interpolation', close_limit=0.5)
return u1, u2
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
test = Test(conf=conf, options=options)
return test
def test_interpolation(self):
from sfepy import data_dir
from sfepy.fem import Mesh
from sfepy.linalg import make_axis_rotation_matrix
fname = in_dir(self.options.out_dir)
meshes = {
'tp' : Mesh('original mesh', data_dir + '/meshes/3d/block.mesh'),
'si' : | Mesh('original mesh', data_dir + '/meshes/3d/cylinder.mesh') | sfepy.fem.Mesh |