prompt
stringlengths 174
59.5k
| completion
stringlengths 7
228
| api
stringlengths 12
64
|
---|---|---|
"""
Functions to visualize the geometry elements and numbering and orientation of
their facets (edges and faces).
The standard geometry elements can be plotted by running::
$ python sfepy/postprocess/plot_facets.py
"""
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.linalg import (get_perpendiculars, normalize_vectors,
make_axis_rotation_matrix)
from sfepy.postprocess.plot_dofs import _get_axes, plot_mesh, plot_global_dofs
def plot_geometry(ax, gel, show=False):
"""
Plot a geometry element as a wireframe.
"""
ax = plot_mesh(ax, gel.coors, [gel.conn], gel.edges, show=False)
ax = plot_global_dofs(ax, gel.coors, [gel.conn], show=show)
return ax
def plot_edges(ax, gel, length, show=False):
"""
Plot edges of a geometry element as numbered arrows.
"""
dim = gel.dim
ax = _get_axes(ax, dim)
l2 = 0.5 * length
for ii, edge in enumerate(gel.edges):
cc = gel.coors[edge]
centre = 0.5 * cc.sum(axis=0)
vdir = (cc - centre)
normalize_vectors(vdir)
cc = l2 * vdir + centre
draw_arrow(ax, cc, length=0.3*length, linewidth=3, color='b')
if dim == 3:
cx, cy, cz = centre
ax.text(cx, cy, cz, ii,
color='b', fontsize=10, weight='light')
else:
cx, cy = centre
ax.text(cx, cy, ii,
color='b', fontsize=10, weight='light')
return ax
def plot_faces(ax, gel, radius, n_point, show=False):
"""
Plot faces of a 3D geometry element as numbered oriented arcs. An arc
centre corresponds to the first node of a face. It points from the first
edge towards the last edge of the face.
"""
dim = gel.dim
ax = _get_axes(ax, dim)
if dim < 3: return ax
for ii, face in enumerate(gel.faces):
cc = gel.coors[face]
t1 = cc[1, :] - cc[0, :]
t2 = cc[-1, :] - cc[0, :]
n = nm.cross(t1, t2)
nt1 = nm.linalg.norm(t1)
nt2 = nm.linalg.norm(t2)
angle = nm.arccos(nm.dot(t1, t2) / (nt1 * nt2))
da = angle / (n_point - 1)
mtx = | make_axis_rotation_matrix(n, da) | sfepy.linalg.make_axis_rotation_matrix |
"""
Functions to visualize the geometry elements and numbering and orientation of
their facets (edges and faces).
The standard geometry elements can be plotted by running::
$ python sfepy/postprocess/plot_facets.py
"""
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.linalg import (get_perpendiculars, normalize_vectors,
make_axis_rotation_matrix)
from sfepy.postprocess.plot_dofs import _get_axes, plot_mesh, plot_global_dofs
def plot_geometry(ax, gel, show=False):
"""
Plot a geometry element as a wireframe.
"""
ax = plot_mesh(ax, gel.coors, [gel.conn], gel.edges, show=False)
ax = plot_global_dofs(ax, gel.coors, [gel.conn], show=show)
return ax
def plot_edges(ax, gel, length, show=False):
"""
Plot edges of a geometry element as numbered arrows.
"""
dim = gel.dim
ax = _get_axes(ax, dim)
l2 = 0.5 * length
for ii, edge in enumerate(gel.edges):
cc = gel.coors[edge]
centre = 0.5 * cc.sum(axis=0)
vdir = (cc - centre)
normalize_vectors(vdir)
cc = l2 * vdir + centre
draw_arrow(ax, cc, length=0.3*length, linewidth=3, color='b')
if dim == 3:
cx, cy, cz = centre
ax.text(cx, cy, cz, ii,
color='b', fontsize=10, weight='light')
else:
cx, cy = centre
ax.text(cx, cy, ii,
color='b', fontsize=10, weight='light')
return ax
def plot_faces(ax, gel, radius, n_point, show=False):
"""
Plot faces of a 3D geometry element as numbered oriented arcs. An arc
centre corresponds to the first node of a face. It points from the first
edge towards the last edge of the face.
"""
dim = gel.dim
ax = _get_axes(ax, dim)
if dim < 3: return ax
for ii, face in enumerate(gel.faces):
cc = gel.coors[face]
t1 = cc[1, :] - cc[0, :]
t2 = cc[-1, :] - cc[0, :]
n = nm.cross(t1, t2)
nt1 = nm.linalg.norm(t1)
nt2 = nm.linalg.norm(t2)
angle = nm.arccos(nm.dot(t1, t2) / (nt1 * nt2))
da = angle / (n_point - 1)
mtx = make_axis_rotation_matrix(n, da)
rt = cc[0] + radius * t1 / nt1
coors = [rt]
for ip in range(n_point - 1):
rt = nm.dot(mtx.T, (rt - cc[0])) + cc[0]
coors.append(rt)
coors = nm.array(coors, dtype=nm.float64)
centre = coors.sum(axis=0) / coors.shape[0]
draw_arrow(ax, coors, length=0.3*radius, linewidth=3, color='r')
if dim == 3:
cx, cy, cz = centre
ax.text(cx, cy, cz, ii,
color='r', fontsize=10, weight='light')
else:
cx, cy = centre
ax.text(cx, cy, ii,
color='r', fontsize=10, weight='light')
return ax
def draw_arrow(ax, coors, angle=20.0, length=0.3, **kwargs):
"""
Draw a line ended with an arrow head, in 2D or 3D.
"""
color = kwargs.get('color', 'b')
c0 = coors[-2]
c1 = coors[-1]
vd = c1 - c0
nvd = nm.linalg.norm(vd)
vd /= nvd
c0 = c1 - length * vd
ps = get_perpendiculars(vd)
rangle = nm.deg2rad(min(angle, 60.0))
plength = length * nm.arctan(rangle)
if coors.shape[1] == 2:
from matplotlib.patches import Polygon
cx, cy = coors[:, 0], coors[:, 1]
ax.plot(cx, cy, **kwargs)
p0 = c0 + plength * ps
p1 = c0 - plength * ps
pol = Polygon([p0, p1, c1], color=color)
ax.add_artist(pol)
else:
import mpl_toolkits.mplot3d as plt3
cx, cy, cz = coors[:, 0], coors[:, 1], coors[:, 2]
ax.plot(cx, cy, cz, **kwargs)
p00 = c0 + plength * ps[0]
p01 = c0 - plength * ps[0]
p10 = c0 + plength * ps[1]
p11 = c0 - plength * ps[1]
arr = plt3.art3d.Poly3DCollection([[p00, p01, c1],
[p10, p11, c1]], color=color)
ax.add_collection3d(arr)
if __name__ == '__main__':
from sfepy.discrete.fem.geometry_element import GeometryElement, geometry_data
for key, gd in geometry_data.iteritems():
if key == '1_2' : continue
gel = | GeometryElement(key) | sfepy.discrete.fem.geometry_element.GeometryElement |
# c: 21.09.2008
import os
import numpy as nm
from sfepy import data_dir
from sfepy.fem import MeshIO
filename_mesh = data_dir + '/meshes/2d/special/circle_in_square.mesh'
## filename_mesh = data_dir + '/meshes/2d/special/circle_in_square_small.mesh'
## filename_mesh = data_dir + '/meshes/3d/special/cube_sphere.mesh'
## filename_mesh = data_dir + '/meshes/2d/special/cube_cylinder.mesh'
omega = 1
omega_squared = omega**2
conf_dir = os.path.dirname(__file__)
io = | MeshIO.any_from_filename(filename_mesh, prefix_dir=conf_dir) | sfepy.fem.MeshIO.any_from_filename |
from sfepy import data_dir
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_data(nodes, nod_ids, conns, mat_ids, descs)
## mesh.write('aux.vtk', io='auto')
elif mode == 'write':
pass
from sfepy.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, | UserMeshIO(mesh_hook) | sfepy.fem.meshio.UserMeshIO |
from sfepy import data_dir
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_data(nodes, nod_ids, conns, mat_ids, descs)
## mesh.write('aux.vtk', io='auto')
elif mode == 'write':
pass
from sfepy.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, UserMeshIO(mesh_hook)])
same = [(0, 1), (2, 3)]
import os.path as op
from sfepy.base.testing import TestCommon, assert_
##
# c: 05.02.2008
class Test( TestCommon ):
"""Write test names explicitely to impose a given order of evaluation."""
tests = ['test_read_meshes', 'test_compare_same_meshes', 'test_read_dimension']
##
# c: 05.02.2008, r: 05.02.2008
def from_conf( conf, options ):
return Test( conf = conf, options = options )
from_conf = staticmethod( from_conf )
##
# c: 05.02.2008, r: 05.02.2008
def test_read_meshes( self ):
"""Try to read all listed meshes."""
from sfepy.fem import Mesh
conf_dir = op.dirname(__file__)
meshes = {}
for ii, filename in enumerate( filename_meshes ):
self.report( '%d. mesh: %s' % (ii + 1, filename) )
mesh = | Mesh.from_file(filename, prefix_dir=conf_dir) | sfepy.fem.Mesh.from_file |
from sfepy import data_dir
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_data(nodes, nod_ids, conns, mat_ids, descs)
## mesh.write('aux.vtk', io='auto')
elif mode == 'write':
pass
from sfepy.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, UserMeshIO(mesh_hook)])
same = [(0, 1), (2, 3)]
import os.path as op
from sfepy.base.testing import TestCommon, assert_
##
# c: 05.02.2008
class Test( TestCommon ):
"""Write test names explicitely to impose a given order of evaluation."""
tests = ['test_read_meshes', 'test_compare_same_meshes', 'test_read_dimension']
##
# c: 05.02.2008, r: 05.02.2008
def from_conf( conf, options ):
return Test( conf = conf, options = options )
from_conf = staticmethod( from_conf )
##
# c: 05.02.2008, r: 05.02.2008
def test_read_meshes( self ):
"""Try to read all listed meshes."""
from sfepy.fem import Mesh
conf_dir = op.dirname(__file__)
meshes = {}
for ii, filename in enumerate( filename_meshes ):
self.report( '%d. mesh: %s' % (ii + 1, filename) )
mesh = Mesh.from_file(filename, prefix_dir=conf_dir)
assert_(mesh.dim == (mesh.coors.shape[1]))
assert_(mesh.n_nod == (mesh.coors.shape[0]))
assert_(mesh.n_nod == (mesh.ngroups.shape[0]))
assert_(mesh.n_el == sum(mesh.n_els))
for ig, conn in enumerate( mesh.conns ):
assert_(conn.shape[0] == len(mesh.mat_ids[ig]))
assert_(conn.shape[0] == mesh.n_els[ig])
assert_(conn.shape[1] == mesh.n_e_ps[ig])
self.report( 'read ok' )
meshes[filename] = mesh
self.meshes = meshes
return True
##
# c: 05.02.2008, r: 05.02.2008
def test_compare_same_meshes( self ):
"""Compare same meshes in various formats."""
import numpy as nm
oks = []
for i0, i1 in same:
name0 = filename_meshes[i0]
name1 = filename_meshes[i1]
self.report( 'comparing meshes from "%s" and "%s"' % (name0, name1) )
mesh0 = self.meshes[name0]
mesh1 = self.meshes[name1]
ok0 = (mesh0.dim == mesh1.dim)
if not ok0:
self.report( 'dimension failed!' )
oks.append( ok0 )
ok0 = mesh0.n_nod == mesh1.n_nod
if not ok0:
self.report( 'number of nodes failed!' )
oks.append( ok0 )
ok0 = mesh0.n_el == mesh1.n_el
if not ok0:
self.report( 'number of elements failed!' )
oks.append( ok0 )
ok0 = mesh0.n_e_ps == mesh1.n_e_ps
if not ok0:
self.report( 'number of element points failed!' )
oks.append( ok0 )
ok0 = mesh0.descs == mesh1.descs
if not ok0:
self.report( 'element types failed!' )
oks.append( ok0 )
ok0 = nm.allclose( mesh0.coors, mesh1.coors )
if not ok0:
self.report( 'nodes failed!' )
oks.append( ok0 )
ok0 = nm.all( mesh0.ngroups == mesh1.ngroups )
if not ok0:
self.report( 'node groups failed!' )
oks.append( ok0 )
for ii in range( len( mesh0.mat_ids ) ):
ok0 = nm.all( mesh0.mat_ids[ii] == mesh1.mat_ids[ii] )
if not ok0:
self.report( 'material ids failed!' )
oks.append( ok0 )
for ii in range( len( mesh0.mat_ids ) ):
ok0 = nm.all( mesh0.conns[ii] == mesh1.conns[ii] )
if not ok0:
self.report( 'connectivities failed!' )
oks.append( ok0 )
return sum( oks ) == len( oks )
##
# c: 03.07.2008, r: 03.07.2008
def test_read_dimension( self ):
from sfepy.fem import MeshIO
meshes = {data_dir + '/meshes/various_formats/small2d.mesh' : 2,
data_dir + '/meshes/various_formats/small2d.vtk' : 2,
data_dir + '/meshes/various_formats/small3d.mesh' : 3}
ok = True
conf_dir = op.dirname(__file__)
for filename, adim in meshes.iteritems():
self.report( 'mesh: %s, dimension %d' % (filename, adim) )
io = | MeshIO.any_from_filename(filename, prefix_dir=conf_dir) | sfepy.fem.MeshIO.any_from_filename |
from sfepy import data_dir
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_data(nodes, nod_ids, conns, mat_ids, descs)
## mesh.write('aux.vtk', io='auto')
elif mode == 'write':
pass
from sfepy.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, UserMeshIO(mesh_hook)])
same = [(0, 1), (2, 3)]
import os.path as op
from sfepy.base.testing import TestCommon, assert_
##
# c: 05.02.2008
class Test( TestCommon ):
"""Write test names explicitely to impose a given order of evaluation."""
tests = ['test_read_meshes', 'test_compare_same_meshes', 'test_read_dimension']
##
# c: 05.02.2008, r: 05.02.2008
def from_conf( conf, options ):
return Test( conf = conf, options = options )
from_conf = staticmethod( from_conf )
##
# c: 05.02.2008, r: 05.02.2008
def test_read_meshes( self ):
"""Try to read all listed meshes."""
from sfepy.fem import Mesh
conf_dir = op.dirname(__file__)
meshes = {}
for ii, filename in enumerate( filename_meshes ):
self.report( '%d. mesh: %s' % (ii + 1, filename) )
mesh = Mesh.from_file(filename, prefix_dir=conf_dir)
assert_(mesh.dim == (mesh.coors.shape[1]))
assert_(mesh.n_nod == (mesh.coors.shape[0]))
assert_(mesh.n_nod == (mesh.ngroups.shape[0]))
assert_(mesh.n_el == sum(mesh.n_els))
for ig, conn in enumerate( mesh.conns ):
assert_(conn.shape[0] == len(mesh.mat_ids[ig]))
| assert_(conn.shape[0] == mesh.n_els[ig]) | sfepy.base.testing.assert_ |
from sfepy import data_dir
filename_meshes = ['/meshes/3d/cylinder.mesh',
'/meshes/3d/cylinder.vtk',
'/meshes/various_formats/small2d.mesh',
'/meshes/various_formats/small2d.vtk',
'/meshes/various_formats/octahedron.node',
'/meshes/various_formats/comsol_tri.txt',
'/meshes/various_formats/abaqus_hex.inp',
'/meshes/various_formats/abaqus_tet.inp',
'/meshes/various_formats/abaqus_quad.inp',
'/meshes/various_formats/abaqus_tri.inp',
'/meshes/various_formats/abaqus_quad_tri.inp',
'/meshes/various_formats/hex4.mesh3d',
'/meshes/various_formats/tetra8.mesh3d',
'/meshes/various_formats/cube.bdf',
'/meshes/various_formats/med_2d_tri_quad.med',
'/meshes/various_formats/med_3d_tet_hex.med']
filename_meshes = [data_dir + name for name in filename_meshes]
def mesh_hook(mesh, mode):
"""
Define a mesh programmatically.
"""
if mode == 'read':
nodes = [[0, 0], [1, 0], [1, 1], [0, 1]]
nod_ids = [0, 0, 1, 1]
conns = [[[0, 1, 2], [0, 2, 3]]]
mat_ids = [[0, 1]]
descs = ['2_3']
mesh._set_data(nodes, nod_ids, conns, mat_ids, descs)
## mesh.write('aux.vtk', io='auto')
elif mode == 'write':
pass
from sfepy.fem.meshio import UserMeshIO
filename_meshes.extend([mesh_hook, UserMeshIO(mesh_hook)])
same = [(0, 1), (2, 3)]
import os.path as op
from sfepy.base.testing import TestCommon, assert_
##
# c: 05.02.2008
class Test( TestCommon ):
"""Write test names explicitely to impose a given order of evaluation."""
tests = ['test_read_meshes', 'test_compare_same_meshes', 'test_read_dimension']
##
# c: 05.02.2008, r: 05.02.2008
def from_conf( conf, options ):
return Test( conf = conf, options = options )
from_conf = staticmethod( from_conf )
##
# c: 05.02.2008, r: 05.02.2008
def test_read_meshes( self ):
"""Try to read all listed meshes."""
from sfepy.fem import Mesh
conf_dir = op.dirname(__file__)
meshes = {}
for ii, filename in enumerate( filename_meshes ):
self.report( '%d. mesh: %s' % (ii + 1, filename) )
mesh = Mesh.from_file(filename, prefix_dir=conf_dir)
assert_(mesh.dim == (mesh.coors.shape[1]))
assert_(mesh.n_nod == (mesh.coors.shape[0]))
assert_(mesh.n_nod == (mesh.ngroups.shape[0]))
assert_(mesh.n_el == sum(mesh.n_els))
for ig, conn in enumerate( mesh.conns ):
assert_(conn.shape[0] == len(mesh.mat_ids[ig]))
assert_(conn.shape[0] == mesh.n_els[ig])
| assert_(conn.shape[1] == mesh.n_e_ps[ig]) | sfepy.base.testing.assert_ |
# 10.07.2007, c
# last revision: 25.03.2008
from sfepy import data_dir
filename_meshes = ['/meshes/3d/cube_medium_tetra.mesh',
'/meshes/3d/cube_medium_tetra.mesh',
'/meshes/3d/cube_medium_hexa.mesh']
filename_meshes = [data_dir + name for name in filename_meshes]
all_your_bases = [1, 2, 1]
filename_mesh = None
field_1 = {
'name' : '3_displacement',
'dtype' : 'real',
'shape' : (3,),
'region' : 'Omega',
'approx_order' : None,
}
def get_pars( dim, full = False ):
import numpy as nm
sym = (dim + 1) * dim / 2
lam = 1e1
mu = 1e0
o = nm.array( [1.] * dim + [0.] * (sym - dim), dtype = nm.float64 )
oot = nm.outer( o, o )
if full:
return lam * oot + mu * nm.diag( o + 1.0 )
else:
return lam, mu
material_1 = {
'name' : 'solid',
'values' : {
'lam' : get_pars( 3 )[0],
'mu' : get_pars( 3 )[1],
'Dijkl' : get_pars( 3, True ),
}
}
material_2 = {
'name' : 'spring',
'values' : {
'.pars' : {'stiffness' : 1e0, 'projection' : None},
}
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '3_displacement',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '3_displacement',
'dual' : 'u',
}
region_1000 = {
'name' : 'Omega',
'select' : 'all',
}
region_1 = {
'name' : 'Bottom',
'select' : 'vertices in (z < -0.499)',
'kind' : 'facet',
}
region_2 = {
'name' : 'Top',
'select' : 'vertices in (z > 0.499)',
'kind' : 'facet',
}
ebc_1 = {
'name' : 'Load',
'region' : 'Top',
'dofs' : {'u.2' : 0.1},
}
integral_1 = {
'name' : 'i',
'order' : 2,
}
equations_iso = {
'balance_of_forces' :
"""dw_lin_elastic_iso.i.Omega( solid.lam, solid.mu, v, u )
= dw_point_lspring.i.Bottom( spring.pars, v, u )""",
}
equations_general = {
'balance_of_forces' :
"""dw_lin_elastic.i.Omega( solid.Dijkl, v, u )
= dw_point_lspring.i.Bottom( spring.pars, v, u )""",
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 1,
'eps_a' : 1e-10,
}
from sfepy.base.testing import TestCommon
##
# 10.07.2007, c
class Test( TestCommon ):
tests = ['test_get_solution', 'test_linear_terms']
##
# 10.07.2007, c
def from_conf( conf, options ):
return Test( conf = conf, options = options )
from_conf = staticmethod( from_conf )
##
# c: 25.03.2008, r: 25.03.2008
def test_linear_terms( self ):
ok = True
for sols in self.solutions:
ok = ok and self.compare_vectors( sols[0], sols[1],
label1 = 'isotropic',
label2 = 'general' )
return ok
##
# c: 10.07.2007, r: 25.03.2008
def test_get_solution( self ):
from sfepy.applications import solve_pde
from sfepy.base.base import IndexedStruct
import os.path as op
ok = True
self.solutions = []
for ii, approx_order in enumerate(all_your_bases):
fname = filename_meshes[ii]
self.conf.filename_mesh = fname
fields = {'field_1' : {
'name' : '3_displacement',
'dtype' : 'real',
'shape' : (3,),
'region' : 'Omega',
'approx_order' : approx_order,
}
}
self.conf.edit('fields', fields)
self.report( 'mesh: %s, base: %s' % (fname, approx_order) )
status = | IndexedStruct() | sfepy.base.base.IndexedStruct |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import find_subclasses, assert_, Struct
from sfepy.linalg import combine, insert_strided_axis
from six.moves import range
from functools import reduce
# Requires fixed vertex numbering!
vertex_maps = {3 : [[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1]],
2 : [[0, 0],
[1, 0],
[1, 1],
[0, 1]],
1 : [[0],
[1]]}
def transform_basis(transform, bf):
"""
Transform a basis `bf` using `transform` array of matrices.
"""
if bf.ndim == 3:
nbf = nm.einsum('cij,qdj->cqdi', transform, bf)
else:
nbf = nm.einsum('cij,oqdj->cqdi', transform, bf)
return nbf
class LagrangeNodes(Struct):
"""Helper class for defining nodes of Lagrange elements."""
@staticmethod
def append_edges(nodes, nts, iseq, nt, edges, order):
delta = 1.0 / float(order)
for ii, edge in enumerate(edges):
n1 = nodes[edge[0],:].copy()
n2 = nodes[edge[1],:].copy()
for ie in range(order - 1):
c2 = ie + 1
c1 = order - c2
nts[iseq] = [nt, ii]
aux = [int(round(tmp)) for tmp in delta * (c1 * n1 + c2 * n2)]
nodes[iseq,:] = aux
iseq += 1
return iseq
@staticmethod
def append_faces(nodes, nts, iseq, nt, faces, order):
delta = 1.0 / float(order)
for ii, face in enumerate(faces):
n1 = nodes[face[0],:].copy()
n2 = nodes[face[1],:].copy()
n3 = nodes[face[2],:].copy()
for i1 in range(order - 2):
for i2 in range(order - 2 - i1):
c3 = i1 + 1
c2 = i2 + 1
c1 = order - c3 - c2
nts[iseq] = [nt, ii]
aux = [int(round(tmp)) for tmp
in delta * (c1 * n1 + c2 * n2 + c3 * n3)]
nodes[iseq,:] = aux
iseq += 1
return iseq
@staticmethod
def append_bubbles(nodes, nts, iseq, nt, order):
delta = 1.0 / float(order)
n1 = nodes[0,:].copy()
n2 = nodes[1,:].copy()
n3 = nodes[2,:].copy()
n4 = nodes[3,:].copy()
for i1 in range(order - 3):
for i2 in range(order - 3):
for i3 in range(order - 3 - i1 - i2):
c4 = i1 + 1
c3 = i2 + 1
c2 = i3 + 1
c1 = order - c4 - c3 - c2
nts[iseq] = [nt, 0]
aux = [int(round(tmp)) for tmp
in delta * (c1 * n1 + c2 * n2 + c3 * n3 + c4 * n4)]
nodes[iseq,:] = aux
iseq += 1
return iseq
@staticmethod
def append_tp_edges(nodes, nts, iseq, nt, edges, ao):
delta = 1.0 / float(ao)
for ii, edge in enumerate(edges):
n1 = nodes[edge[0],:].copy()
n2 = nodes[edge[1],:].copy()
for ie in range(ao - 1):
c2 = ie + 1
c1 = ao - c2
nts[iseq] = [nt, ii]
aux = [int(round(tmp)) for tmp in delta * (c1 * n1 + c2 * n2)]
nodes[iseq,:] = aux
iseq += 1
return iseq
@staticmethod
def append_tp_faces(nodes, nts, iseq, nt, faces, ao):
delta = 1.0 / (float(ao) ** 2)
for ii, face in enumerate( faces ):
n1 = nodes[face[0],:].copy()
n2 = nodes[face[1],:].copy()
n3 = nodes[face[2],:].copy()
n4 = nodes[face[3],:].copy()
for i1 in range(ao - 1):
for i2 in range(ao - 1):
c4 = i1 + 1
c3 = i2 + 1
c2 = ao - c4
c1 = ao - c3
nts[iseq] = [nt, ii]
aux = [int(round(tmp)) for tmp
in delta * (c1 * c2 * n1 + c2 * c3 * n2
+ c3 * c4 * n3 + c4 * c1 * n4)]
nodes[iseq,:] = aux
iseq += 1
return iseq
@staticmethod
def append_tp_bubbles(nodes, nts, iseq, nt, ao):
delta = 1.0 / (float(ao) ** 3)
n1 = nodes[0,:].copy()
n2 = nodes[1,:].copy()
n3 = nodes[2,:].copy()
n4 = nodes[3,:].copy()
n5 = nodes[4,:].copy()
n6 = nodes[5,:].copy()
n7 = nodes[6,:].copy()
n8 = nodes[7,:].copy()
for i1 in range(ao - 1):
for i2 in range(ao - 1):
for i3 in range(ao - 1):
c6 = i1 + 1
c5 = i2 + 1
c4 = i3 + 1
c3 = ao - c6
c2 = ao - c5
c1 = ao - c4
nts[iseq] = [nt, 0]
aux = [int(round(tmp)) for tmp
in delta * (c1 * c2 * c3 * n1 + c4 * c2 * c3 * n2
+ c5 * c4 * c3 * n3 + c1 * c3 * c5 * n4
+ c1 * c2 * c6 * n5 + c4 * c2 * c6 * n6
+ c5 * c4 * c6 * n7 + c1 * c6 * c5 * n8)]
nodes[iseq,:] = aux
iseq += 1
return iseq
class NodeDescription(Struct):
"""
Describe FE nodes defined on different parts of a reference element.
"""
def _describe_facets(self, ii):
nts = self.node_types[ii]
ik = nm.where(nts[1:,1] > nts[:-1,1])[0]
if len(ik) == 0:
ifacets = None
n_dof = 0
else:
ii = ii.astype(nm.int32)
ik = nm.r_[0, ik + 1, nts.shape[0]]
ifacets = [ii[ik[ir] : ik[ir+1]] for ir in range(len(ik) - 1)]
n_dof = len(ifacets[0])
return ifacets, n_dof
def _describe_other(self, ii):
if len(ii):
return ii, len(ii)
else:
return None, 0
def _get_facet_nodes(self, ifacets, nodes):
if ifacets is None:
return None
else:
return [nodes[ii] for ii in ifacets]
def _get_nodes(self, ii, nodes):
if ii is None:
return None
else:
return nodes[ii]
def __init__(self, node_types, nodes):
self.node_types = node_types
# Vertex nodes.
ii = nm.where(node_types[:,0] == 0)[0]
self.vertex, self.n_vertex_nod = self._describe_other(ii)
self.vertex_nodes = self._get_nodes(self.vertex, nodes)
# Edge nodes.
ii = nm.where(node_types[:,0] == 1)[0]
self.edge, self.n_edge_nod = self._describe_facets(ii)
self.edge_nodes = self._get_facet_nodes(self.edge, nodes)
# Face nodes.
ii = nm.where(node_types[:,0] == 2)[0]
self.face, self.n_face_nod = self._describe_facets(ii)
self.face_nodes = self._get_facet_nodes(self.face, nodes)
# Bubble nodes.
ii = nm.where(node_types[:,0] == 3)[0]
self.bubble, self.n_bubble_nod = self._describe_other(ii)
self.bubble_nodes = self._get_nodes(self.bubble, nodes)
def has_extra_nodes(self):
"""
Return True if the element has some edge, face or bubble nodes.
"""
return (self.n_edge_nod + self.n_face_nod + self.n_bubble_nod) > 0
class PolySpace(Struct):
"""Abstract polynomial space class."""
_all = None
keys = {
(1, 2) : 'simplex',
(2, 3) : 'simplex',
(3, 4) : 'simplex',
(2, 4) : 'tensor_product',
(3, 8) : 'tensor_product',
}
@staticmethod
def any_from_args(name, geometry, order, base='lagrange',
force_bubble=False):
"""
Construct a particular polynomial space classes according to the
arguments passed in.
"""
if name is None:
name = PolySpace.suggest_name(geometry, order, base, force_bubble)
if PolySpace._all is None:
PolySpace._all = find_subclasses(globals(), [PolySpace])
table = PolySpace._all
key = '%s_%s' % (base, PolySpace.keys[(geometry.dim,
geometry.n_vertex)])
if (geometry.name == '1_2') and (key not in table):
key = '%s_%s' % (base, 'tensor_product')
if force_bubble:
key += '_bubble'
return table[key](name, geometry, order)
@staticmethod
def suggest_name(geometry, order, base='lagrange',
force_bubble=False):
"""
Suggest the polynomial space name given its constructor parameters.
"""
aux = geometry.get_interpolation_name()[:-1]
if force_bubble:
return aux + ('%dB' % order)
else:
return aux + ('%d' % order)
def __init__(self, name, geometry, order):
self.name = name
self.geometry = geometry
self.order = order
self.bbox = nm.vstack((geometry.coors.min(0), geometry.coors.max(0)))
def eval_base(self, coors, diff=0, ori=None, force_axis=False,
transform=None, suppress_errors=False, eps=1e-15):
"""
Evaluate the basis or its first or second derivatives in points given
by coordinates. The real work is done in _eval_base() implemented in
subclasses.
Note that the second derivative code is a work-in-progress and only
`coors` and `transform` arguments are used.
Parameters
----------
coors : array_like
The coordinates of points where the basis is evaluated. See Notes.
diff : 0, 1 or 2
If nonzero, return the given derivative.
ori : array_like, optional
Optional orientation of element facets for per element basis.
force_axis : bool
If True, force the resulting array shape to have one more axis even
when `ori` is None.
transform : array_like, optional
The basis transform array.
suppress_errors : bool
If True, do not report points outside the reference domain.
eps : float
Accuracy for comparing coordinates.
Returns
-------
base : array
The basis (shape (n_coor, 1, n_base)) or its first derivative
(shape (n_coor, dim, n_base)) or its second derivative (shape
(n_coor, dim, dim, n_base)) evaluated in the given points. An
additional axis is pre-pended of length n_cell, if `ori` is given,
or of length 1, if `force_axis` is True.
Notes
-----
If coors.ndim == 3, several point sets are assumed, with equal number
of points in each of them. This is the case, for example, of the
values of the volume base functions on the element facets. The indexing
(of bf_b(g)) is then (ifa,iqp,:,n_ep), so that the facet can be set in
C using FMF_SetCell.
"""
coors = nm.asarray(coors)
if not coors.ndim in (2, 3):
raise ValueError('coordinates must have 2 or 3 dimensions! (%d)'
% coors.ndim)
if (coors.ndim == 2):
base = self._eval_base(coors, diff=diff, ori=ori,
suppress_errors=suppress_errors,
eps=eps)
if (base.ndim == 3) and force_axis:
base = base[None, ...]
if not base.flags['C_CONTIGUOUS']:
base = nm.ascontiguousarray(base)
else: # Several point sets.
if diff:
bdim = self.geometry.dim
else:
bdim = 1
base = nm.empty((coors.shape[0], coors.shape[1],
bdim, self.n_nod), dtype=nm.float64)
for ii, _coors in enumerate(coors):
base[ii] = self._eval_base(_coors, diff=diff, ori=ori,
suppress_errors=suppress_errors,
eps=eps)
if transform is not None:
base = transform_basis(transform, base)
return base
def get_mtx_i(self):
return self.mtx_i
def describe_nodes(self):
return NodeDescription(self.nts, self.nodes)
class LagrangePolySpace(PolySpace):
def create_context(self, cmesh, eps, check_errors, i_max, newton_eps,
tdim=None):
from sfepy.discrete.fem.extmods.bases import CLagrangeContext
ref_coors = self.geometry.coors
if cmesh is not None:
mesh_coors = cmesh.coors
conn = cmesh.get_conn(cmesh.tdim, 0)
mesh_conn = conn.indices.reshape(cmesh.n_el, -1).astype(nm.int32)
if tdim is None:
tdim = cmesh.tdim
else:
mesh_coors = mesh_conn = None
if tdim is None:
raise ValueError('supply either cmesh or tdim!')
ctx = CLagrangeContext(order=self.order,
tdim=tdim,
nodes=self.nodes,
ref_coors=ref_coors,
mesh_coors=mesh_coors,
mesh_conn=mesh_conn,
mtx_i=self.get_mtx_i(),
eps=eps,
check_errors=check_errors,
i_max=i_max,
newton_eps=newton_eps)
return ctx
def _eval_base(self, coors, diff=0, ori=None,
suppress_errors=False, eps=1e-15):
"""
See :func:`PolySpace.eval_base()`.
"""
if diff == 2:
base = self._eval_hessian(coors)
else:
base = self.eval_ctx.evaluate(coors, diff=diff,
eps=eps,
check_errors=not suppress_errors)
return base
class LagrangeSimplexPolySpace(LagrangePolySpace):
"""Lagrange polynomial space on a simplex domain."""
name = 'lagrange_simplex'
def __init__(self, name, geometry, order, init_context=True):
PolySpace.__init__(self, name, geometry, order)
n_v = geometry.n_vertex
mtx = nm.ones((n_v, n_v), nm.float64)
mtx[0:n_v-1,:] = nm.transpose(geometry.coors)
self.mtx_i = nm.ascontiguousarray(nla.inv(mtx))
self.rhs = nm.ones((n_v,), nm.float64)
self.nodes, self.nts, node_coors = self._define_nodes()
self.node_coors = nm.ascontiguousarray(node_coors)
self.n_nod = self.nodes.shape[0]
if init_context:
self.eval_ctx = self.create_context(None, 0, 1e-15, 100, 1e-8,
tdim=n_v - 1)
else:
self.eval_ctx = None
def _define_nodes(self):
# Factorial.
fac = lambda n : reduce(lambda a, b : a * (b + 1), range(n), 1)
geometry = self.geometry
n_v, dim = geometry.n_vertex, geometry.dim
order = self.order
n_nod = fac(order + dim) // (fac(order) * fac(dim))
## print n_nod, gd
nodes = nm.zeros((n_nod, n_v), nm.int32)
nts = nm.zeros((n_nod, 2), nm.int32)
if order == 0:
nts[0,:] = [3, 0]
nodes[0,:] = nm.zeros((n_v,), nm.int32)
else:
iseq = 0
# Vertex nodes.
nts[0:n_v,0] = 0
nts[0:n_v,1] = nm.arange(n_v, dtype = nm.int32)
aux = order * nm.identity(n_v, dtype = nm.int32)
nodes[iseq:iseq+n_v,:] = aux
iseq += n_v
if dim == 1:
iseq = LagrangeNodes.append_edges(nodes, nts, iseq, 3,
[[0, 1]], order)
elif dim == 2:
iseq = LagrangeNodes.append_edges(nodes, nts, iseq, 1,
geometry.edges, order)
iseq = LagrangeNodes.append_faces(nodes, nts, iseq, 3,
[[0, 1, 2]], order)
elif dim == 3:
iseq = LagrangeNodes.append_edges(nodes, nts, iseq, 1,
geometry.edges, order)
iseq = LagrangeNodes.append_faces(nodes, nts, iseq, 2,
geometry.faces, order)
iseq = LagrangeNodes.append_bubbles(nodes, nts, iseq, 3,
order)
else:
raise NotImplementedError
## print nm.concatenate((nts, nodes), 1)
# Check orders.
orders = nm.sum(nodes, 1)
if not nm.all(orders == order):
raise AssertionError('wrong orders! (%d == all of %s)'
% (order, orders))
# Coordinates of the nodes.
if order == 0:
tmp = nm.ones((n_nod, n_v), nm.int32)
node_coors = nm.dot(tmp, geometry.coors) / n_v
else:
node_coors = nm.dot(nodes, geometry.coors) / order
return nodes, nts, node_coors
def _eval_hessian(self, coors):
"""
Evaluate the second derivatives of the basis.
"""
def get_bc(coor):
rhs = nm.concatenate((coor, [1]))
bc = nm.dot(self.mtx_i, rhs)
return bc
def get_val(bc, node, omit=[]):
val = nm.ones(1, nm.float64)
for i1 in range(bc.shape[0]):
if i1 in omit: continue
for i2 in range(node[i1]):
val *= (self.order * bc[i1] - i2) / (i2 + 1.0)
return val
def get_der(bc1, node1, omit=[]):
val = nm.zeros(1, nm.float64)
for i1 in range(node1):
if i1 in omit: continue
aux = nm.ones(1, nm.float64)
for i2 in range(node1):
if (i1 == i2) or (i2 in omit): continue
aux *= (self.order * bc1 - i2) / (i2 + 1.0)
val += aux * self.order / (i1 + 1.0)
return val
n_v = self.mtx_i.shape[0]
dim = n_v - 1
mi = self.mtx_i[:, :dim]
bfgg = nm.zeros((coors.shape[0], dim, dim, self.n_nod),
dtype=nm.float64)
for ic, coor in enumerate(coors):
bc = get_bc(coor)
for ii, node in enumerate(self.nodes):
for ig1, bc1 in enumerate(bc): # 1. derivative w.r.t. bc1.
for ig2, bc2 in enumerate(bc): # 2. derivative w.r.t. bc2.
if ig1 == ig2:
val = get_val(bc, node, omit=[ig1])
vv = 0.0
for i1 in range(node[ig1]):
aux = get_der(bc2, node[ig2], omit=[i1])
vv += aux * self.order / (i1 + 1.0)
val *= vv
else:
val = get_val(bc, node, omit=[ig1, ig2])
val *= get_der(bc1, node[ig1])
val *= get_der(bc2, node[ig2])
bfgg[ic, :, :, ii] += val * mi[ig1] * mi[ig2][:, None]
return bfgg
class LagrangeSimplexBPolySpace(LagrangeSimplexPolySpace):
"""Lagrange polynomial space with forced bubble function on a simplex
domain."""
name = 'lagrange_simplex_bubble'
def __init__(self, name, geometry, order, init_context=True):
LagrangeSimplexPolySpace.__init__(self, name, geometry, order,
init_context=False)
nodes, nts, node_coors = self.nodes, self.nts, self.node_coors
shape = [nts.shape[0] + 1, 2]
nts = nm.resize(nts, shape)
nts[-1,:] = [3, 0]
shape = [nodes.shape[0] + 1, nodes.shape[1]]
nodes = nm.resize(nodes, shape)
# Make a 'hypercubic' (cubic in 2D) node.
nodes[-1,:] = 1
n_v = self.geometry.n_vertex
tmp = nm.ones((n_v,), nm.int32)
node_coors = nm.vstack((node_coors,
nm.dot(tmp, self.geometry.coors) / n_v))
self.nodes, self.nts = nodes, nts
self.node_coors = nm.ascontiguousarray(node_coors)
self.bnode = nodes[-1:,:]
self.n_nod = self.nodes.shape[0]
if init_context:
self.eval_ctx = self.create_context(None, 0, 1e-15, 100, 1e-8,
tdim=n_v - 1)
else:
self.eval_ctx = None
def create_context(self, *args, **kwargs):
ctx = LagrangePolySpace.create_context(self, *args, **kwargs)
ctx.is_bubble = 1
return ctx
class LagrangeTensorProductPolySpace(LagrangePolySpace):
"""Lagrange polynomial space on a tensor product domain."""
name = 'lagrange_tensor_product'
def __init__(self, name, geometry, order, init_context=True):
PolySpace.__init__(self, name, geometry, order)
g1d = Struct(n_vertex = 2,
dim = 1,
coors = self.bbox[:,0:1].copy())
self.ps1d = LagrangeSimplexPolySpace('P_aux', g1d, order,
init_context=False)
self.nodes, self.nts, node_coors = self._define_nodes()
self.node_coors = nm.ascontiguousarray(node_coors)
self.n_nod = self.nodes.shape[0]
if init_context:
tdim = int(nm.sqrt(geometry.n_vertex))
self.eval_ctx = self.create_context(None, 0, 1e-15, 100, 1e-8,
tdim=tdim)
else:
self.eval_ctx = None
def _define_nodes(self):
geometry = self.geometry
order = self.order
n_v, dim = geometry.n_vertex, geometry.dim
vertex_map = order * nm.array(vertex_maps[dim], dtype=nm.int32)
n_nod = (order + 1) ** dim
nodes = nm.zeros((n_nod, 2 * dim), nm.int32)
nts = nm.zeros((n_nod, 2), nm.int32)
if order == 0:
nts[0,:] = [3, 0]
nodes[0,:] = nm.zeros((n_nod,), nm.int32)
else:
iseq = 0
# Vertex nodes.
nts[0:n_v,0] = 0
nts[0:n_v,1] = nm.arange( n_v, dtype = nm.int32 )
order * nm.identity( n_v, dtype = nm.int32 )
if dim == 3:
for ii in range(n_v):
i1, i2, i3 = vertex_map[ii]
nodes[iseq,:] = [order - i1, i1,
order - i2, i2,
order - i3, i3]
iseq += 1
elif dim == 2:
for ii in range(n_v):
i1, i2 = vertex_map[ii]
nodes[iseq,:] = [order - i1, i1, order - i2, i2]
iseq += 1
else:
for ii in range(n_v):
i1 = vertex_map[ii][0]
nodes[iseq,:] = [order - i1, i1]
iseq += 1
if dim == 1:
iseq = LagrangeNodes.append_tp_edges(nodes, nts, iseq, 3,
[[0, 1]], order)
elif dim == 2:
iseq = LagrangeNodes.append_tp_edges(nodes, nts, iseq, 1,
geometry.edges, order)
iseq = LagrangeNodes.append_tp_faces(nodes, nts, iseq, 3,
[[0, 1, 2, 3]], order)
elif dim == 3:
iseq = LagrangeNodes.append_tp_edges(nodes, nts, iseq, 1,
geometry.edges, order)
iseq = LagrangeNodes.append_tp_faces(nodes, nts, iseq, 2,
geometry.faces, order)
iseq = LagrangeNodes.append_tp_bubbles(nodes, nts, iseq, 3,
order)
else:
raise NotImplementedError
# Check orders.
orders = nm.sum(nodes, 1)
if not nm.all(orders == order * dim):
raise AssertionError('wrong orders! (%d == all of %s)'
% (order * dim, orders))
# Coordinates of the nodes.
if order == 0:
tmp = nm.ones((n_nod, n_v), nm.int32)
node_coors = nm.dot(tmp, geometry.coors) / n_v
else:
c_min, c_max = self.bbox[:,0]
cr = nm.arange(2 * dim)
node_coors = (nodes[:,cr[::2]] * c_min
+ nodes[:,cr[1::2]] * c_max) / order
return nodes, nts, node_coors
def _eval_base_debug(self, coors, diff=False, ori=None,
suppress_errors=False, eps=1e-15):
"""Python version of eval_base()."""
dim = self.geometry.dim
ev = self.ps1d.eval_base
if diff:
base = nm.ones((coors.shape[0], dim, self.n_nod), dtype=nm.float64)
for ii in range(dim):
self.ps1d.nodes = self.nodes[:,2*ii:2*ii+2].copy()
self.ps1d.n_nod = self.n_nod
for iv in range(dim):
if ii == iv:
base[:,iv:iv+1,:] *= ev(coors[:,ii:ii+1].copy(),
diff=True,
suppress_errors=suppress_errors,
eps=eps)
else:
base[:,iv:iv+1,:] *= ev(coors[:,ii:ii+1].copy(),
diff=False,
suppress_errors=suppress_errors,
eps=eps)
else:
base = nm.ones((coors.shape[0], 1, self.n_nod), dtype=nm.float64)
for ii in range(dim):
self.ps1d.nodes = self.nodes[:,2*ii:2*ii+2].copy()
self.ps1d.n_nod = self.n_nod
base *= ev(coors[:,ii:ii+1].copy(),
diff=diff,
suppress_errors=suppress_errors,
eps=eps)
return base
def _eval_hessian(self, coors):
"""
Evaluate the second derivatives of the basis.
"""
evh = self.ps1d.eval_base
dim = self.geometry.dim
bfgg = nm.zeros((coors.shape[0], dim, dim, self.n_nod),
dtype=nm.float64)
v0s = []
v1s = []
v2s = []
for ii in range(dim):
self.ps1d.nodes = self.nodes[:,2*ii:2*ii+2].copy()
self.ps1d.n_nod = self.n_nod
ev = self.ps1d.create_context(None, 0, 1e-15, 100, 1e-8,
tdim=1).evaluate
v0s.append(ev(coors[:, ii:ii+1].copy())[:, 0, :])
v1s.append(ev(coors[:, ii:ii+1].copy(), diff=1)[:, 0, :])
v2s.append(evh(coors[:, ii:ii+1], diff=2)[:, 0, 0, :])
for ir in range(dim):
vv = v2s[ir] # Destroys v2s!
for ik in range(dim):
if ik == ir: continue
vv *= v0s[ik]
bfgg[:, ir, ir, :] = vv
for ic in range(dim):
if ic == ir: continue
val = v1s[ir] * v1s[ic]
for ik in range(dim):
if (ik == ir) or (ik == ic): continue
val *= v0s[ik]
bfgg[:, ir, ic, :] += val
return bfgg
def get_mtx_i(self):
return self.ps1d.mtx_i
class LobattoTensorProductPolySpace(PolySpace):
"""
Hierarchical polynomial space using Lobatto functions.
Each row of the `nodes` attribute defines indices of Lobatto functions that
need to be multiplied together to evaluate the corresponding shape
function. This defines the ordering of basis functions on the reference
element.
"""
name = 'lobatto_tensor_product'
def __init__(self, name, geometry, order):
PolySpace.__init__(self, name, geometry, order)
aux = self._define_nodes()
self.nodes, self.nts, node_coors, self.face_axes, self.sfnodes = aux
self.node_coors = nm.ascontiguousarray(node_coors)
self.n_nod = self.nodes.shape[0]
aux = nm.where(self.nodes > 0, self.nodes, 1)
self.node_orders = nm.prod(aux, axis=1)
self.edge_indx = nm.where(self.nts[:, 0] == 1)[0]
self.face_indx = nm.where(self.nts[:, 0] == 2)[0]
self.face_axes_nodes = self._get_face_axes_nodes(self.face_axes)
def _get_counts(self):
order = self.order
dim = self.geometry.dim
n_nod = (order + 1) ** dim
n_per_edge = (order - 1)
n_per_face = (order - 1) ** (dim - 1)
n_bubble = (order - 1) ** dim
return n_nod, n_per_edge, n_per_face, n_bubble
def _define_nodes(self):
geometry = self.geometry
order = self.order
n_v, dim = geometry.n_vertex, geometry.dim
n_nod, n_per_edge, n_per_face, n_bubble = self._get_counts()
nodes = nm.zeros((n_nod, dim), nm.int32)
nts = nm.zeros((n_nod, 2), nm.int32)
# Vertex nodes.
nts[0:n_v, 0] = 0
nts[0:n_v, 1] = nm.arange(n_v, dtype=nm.int32)
nodes[0:n_v] = nm.array(vertex_maps[dim], dtype=nm.int32)
ii = n_v
# Edge nodes.
if (dim > 1) and (n_per_edge > 0):
ik = nm.arange(2, order + 1, dtype=nm.int32)
zo = nm.zeros((n_per_edge, 2), dtype=nm.int32)
zo[:, 1] = 1
for ie, edge in enumerate(geometry.edges):
n1, n2 = nodes[edge]
ifix = nm.where(n1 == n2)[0]
irun = nm.where(n1 != n2)[0][0]
ic = n1[ifix]
nodes[ii:ii + n_per_edge, ifix] = zo[:, ic]
nodes[ii:ii + n_per_edge, irun] = ik
nts[ii:ii + n_per_edge] = [[1, ie]]
ii += n_per_edge
# 3D face nodes.
face_axes = []
sfnodes = None
if (dim == 3) and (n_per_face > 0):
n_face = len(geometry.faces)
sfnodes = nm.zeros((n_per_face * n_face, dim), nm.int32)
ii0 = ii
ik = nm.arange(2, order + 1, dtype=nm.int32)
zo = nm.zeros((n_per_face, 2), dtype=nm.int32)
zo[:, 1] = 1
for ifa, face in enumerate(geometry.faces):
ns = nodes[face]
diff = nm.diff(ns, axis=0)
asum = nm.abs(diff).sum(axis=0)
ifix = nm.where(asum == 0)[0][0]
ic = ns[0, ifix]
irun1 = nm.where(asum == 2)[0][0]
irun2 = nm.where(asum == 1)[0][0]
iy, ix = nm.meshgrid(ik, ik)
nodes[ii:ii + n_per_face, ifix] = zo[:, ic]
nodes[ii:ii + n_per_face, irun1] = ix.ravel()
nodes[ii:ii + n_per_face, irun2] = iy.ravel()
nts[ii:ii + n_per_face] = [[2, ifa]]
ij = ii - ii0
sfnodes[ij:ij + n_per_face, ifix] = zo[:, ic]
sfnodes[ij:ij + n_per_face, irun1] = iy.ravel()
sfnodes[ij:ij + n_per_face, irun2] = ix.ravel()
face_axes.append([irun1, irun2])
ii += n_per_face
face_axes = nm.array(face_axes)
# Bubble nodes.
if n_bubble > 0:
ik = nm.arange(2, order + 1, dtype=nm.int32)
nodes[ii:] = nm.array([aux for aux in combine([ik] * dim)])
nts[ii:ii + n_bubble] = [[3, 0]]
ii += n_bubble
| assert_(ii == n_nod) | sfepy.base.base.assert_ |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import find_subclasses, assert_, Struct
from sfepy.linalg import combine, insert_strided_axis
from six.moves import range
from functools import reduce
# Requires fixed vertex numbering!
vertex_maps = {3 : [[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1]],
2 : [[0, 0],
[1, 0],
[1, 1],
[0, 1]],
1 : [[0],
[1]]}
def transform_basis(transform, bf):
"""
Transform a basis `bf` using `transform` array of matrices.
"""
if bf.ndim == 3:
nbf = nm.einsum('cij,qdj->cqdi', transform, bf)
else:
nbf = nm.einsum('cij,oqdj->cqdi', transform, bf)
return nbf
class LagrangeNodes(Struct):
"""Helper class for defining nodes of Lagrange elements."""
@staticmethod
def append_edges(nodes, nts, iseq, nt, edges, order):
delta = 1.0 / float(order)
for ii, edge in enumerate(edges):
n1 = nodes[edge[0],:].copy()
n2 = nodes[edge[1],:].copy()
for ie in range(order - 1):
c2 = ie + 1
c1 = order - c2
nts[iseq] = [nt, ii]
aux = [int(round(tmp)) for tmp in delta * (c1 * n1 + c2 * n2)]
nodes[iseq,:] = aux
iseq += 1
return iseq
@staticmethod
def append_faces(nodes, nts, iseq, nt, faces, order):
delta = 1.0 / float(order)
for ii, face in enumerate(faces):
n1 = nodes[face[0],:].copy()
n2 = nodes[face[1],:].copy()
n3 = nodes[face[2],:].copy()
for i1 in range(order - 2):
for i2 in range(order - 2 - i1):
c3 = i1 + 1
c2 = i2 + 1
c1 = order - c3 - c2
nts[iseq] = [nt, ii]
aux = [int(round(tmp)) for tmp
in delta * (c1 * n1 + c2 * n2 + c3 * n3)]
nodes[iseq,:] = aux
iseq += 1
return iseq
@staticmethod
def append_bubbles(nodes, nts, iseq, nt, order):
delta = 1.0 / float(order)
n1 = nodes[0,:].copy()
n2 = nodes[1,:].copy()
n3 = nodes[2,:].copy()
n4 = nodes[3,:].copy()
for i1 in range(order - 3):
for i2 in range(order - 3):
for i3 in range(order - 3 - i1 - i2):
c4 = i1 + 1
c3 = i2 + 1
c2 = i3 + 1
c1 = order - c4 - c3 - c2
nts[iseq] = [nt, 0]
aux = [int(round(tmp)) for tmp
in delta * (c1 * n1 + c2 * n2 + c3 * n3 + c4 * n4)]
nodes[iseq,:] = aux
iseq += 1
return iseq
@staticmethod
def append_tp_edges(nodes, nts, iseq, nt, edges, ao):
delta = 1.0 / float(ao)
for ii, edge in enumerate(edges):
n1 = nodes[edge[0],:].copy()
n2 = nodes[edge[1],:].copy()
for ie in range(ao - 1):
c2 = ie + 1
c1 = ao - c2
nts[iseq] = [nt, ii]
aux = [int(round(tmp)) for tmp in delta * (c1 * n1 + c2 * n2)]
nodes[iseq,:] = aux
iseq += 1
return iseq
@staticmethod
def append_tp_faces(nodes, nts, iseq, nt, faces, ao):
delta = 1.0 / (float(ao) ** 2)
for ii, face in enumerate( faces ):
n1 = nodes[face[0],:].copy()
n2 = nodes[face[1],:].copy()
n3 = nodes[face[2],:].copy()
n4 = nodes[face[3],:].copy()
for i1 in range(ao - 1):
for i2 in range(ao - 1):
c4 = i1 + 1
c3 = i2 + 1
c2 = ao - c4
c1 = ao - c3
nts[iseq] = [nt, ii]
aux = [int(round(tmp)) for tmp
in delta * (c1 * c2 * n1 + c2 * c3 * n2
+ c3 * c4 * n3 + c4 * c1 * n4)]
nodes[iseq,:] = aux
iseq += 1
return iseq
@staticmethod
def append_tp_bubbles(nodes, nts, iseq, nt, ao):
delta = 1.0 / (float(ao) ** 3)
n1 = nodes[0,:].copy()
n2 = nodes[1,:].copy()
n3 = nodes[2,:].copy()
n4 = nodes[3,:].copy()
n5 = nodes[4,:].copy()
n6 = nodes[5,:].copy()
n7 = nodes[6,:].copy()
n8 = nodes[7,:].copy()
for i1 in range(ao - 1):
for i2 in range(ao - 1):
for i3 in range(ao - 1):
c6 = i1 + 1
c5 = i2 + 1
c4 = i3 + 1
c3 = ao - c6
c2 = ao - c5
c1 = ao - c4
nts[iseq] = [nt, 0]
aux = [int(round(tmp)) for tmp
in delta * (c1 * c2 * c3 * n1 + c4 * c2 * c3 * n2
+ c5 * c4 * c3 * n3 + c1 * c3 * c5 * n4
+ c1 * c2 * c6 * n5 + c4 * c2 * c6 * n6
+ c5 * c4 * c6 * n7 + c1 * c6 * c5 * n8)]
nodes[iseq,:] = aux
iseq += 1
return iseq
class NodeDescription(Struct):
"""
Describe FE nodes defined on different parts of a reference element.
"""
def _describe_facets(self, ii):
nts = self.node_types[ii]
ik = nm.where(nts[1:,1] > nts[:-1,1])[0]
if len(ik) == 0:
ifacets = None
n_dof = 0
else:
ii = ii.astype(nm.int32)
ik = nm.r_[0, ik + 1, nts.shape[0]]
ifacets = [ii[ik[ir] : ik[ir+1]] for ir in range(len(ik) - 1)]
n_dof = len(ifacets[0])
return ifacets, n_dof
def _describe_other(self, ii):
if len(ii):
return ii, len(ii)
else:
return None, 0
def _get_facet_nodes(self, ifacets, nodes):
if ifacets is None:
return None
else:
return [nodes[ii] for ii in ifacets]
def _get_nodes(self, ii, nodes):
if ii is None:
return None
else:
return nodes[ii]
def __init__(self, node_types, nodes):
self.node_types = node_types
# Vertex nodes.
ii = nm.where(node_types[:,0] == 0)[0]
self.vertex, self.n_vertex_nod = self._describe_other(ii)
self.vertex_nodes = self._get_nodes(self.vertex, nodes)
# Edge nodes.
ii = nm.where(node_types[:,0] == 1)[0]
self.edge, self.n_edge_nod = self._describe_facets(ii)
self.edge_nodes = self._get_facet_nodes(self.edge, nodes)
# Face nodes.
ii = nm.where(node_types[:,0] == 2)[0]
self.face, self.n_face_nod = self._describe_facets(ii)
self.face_nodes = self._get_facet_nodes(self.face, nodes)
# Bubble nodes.
ii = nm.where(node_types[:,0] == 3)[0]
self.bubble, self.n_bubble_nod = self._describe_other(ii)
self.bubble_nodes = self._get_nodes(self.bubble, nodes)
def has_extra_nodes(self):
"""
Return True if the element has some edge, face or bubble nodes.
"""
return (self.n_edge_nod + self.n_face_nod + self.n_bubble_nod) > 0
class PolySpace(Struct):
"""Abstract polynomial space class."""
_all = None
keys = {
(1, 2) : 'simplex',
(2, 3) : 'simplex',
(3, 4) : 'simplex',
(2, 4) : 'tensor_product',
(3, 8) : 'tensor_product',
}
@staticmethod
def any_from_args(name, geometry, order, base='lagrange',
force_bubble=False):
"""
Construct a particular polynomial space classes according to the
arguments passed in.
"""
if name is None:
name = PolySpace.suggest_name(geometry, order, base, force_bubble)
if PolySpace._all is None:
PolySpace._all = find_subclasses(globals(), [PolySpace])
table = PolySpace._all
key = '%s_%s' % (base, PolySpace.keys[(geometry.dim,
geometry.n_vertex)])
if (geometry.name == '1_2') and (key not in table):
key = '%s_%s' % (base, 'tensor_product')
if force_bubble:
key += '_bubble'
return table[key](name, geometry, order)
@staticmethod
def suggest_name(geometry, order, base='lagrange',
force_bubble=False):
"""
Suggest the polynomial space name given its constructor parameters.
"""
aux = geometry.get_interpolation_name()[:-1]
if force_bubble:
return aux + ('%dB' % order)
else:
return aux + ('%d' % order)
def __init__(self, name, geometry, order):
self.name = name
self.geometry = geometry
self.order = order
self.bbox = nm.vstack((geometry.coors.min(0), geometry.coors.max(0)))
def eval_base(self, coors, diff=0, ori=None, force_axis=False,
transform=None, suppress_errors=False, eps=1e-15):
"""
Evaluate the basis or its first or second derivatives in points given
by coordinates. The real work is done in _eval_base() implemented in
subclasses.
Note that the second derivative code is a work-in-progress and only
`coors` and `transform` arguments are used.
Parameters
----------
coors : array_like
The coordinates of points where the basis is evaluated. See Notes.
diff : 0, 1 or 2
If nonzero, return the given derivative.
ori : array_like, optional
Optional orientation of element facets for per element basis.
force_axis : bool
If True, force the resulting array shape to have one more axis even
when `ori` is None.
transform : array_like, optional
The basis transform array.
suppress_errors : bool
If True, do not report points outside the reference domain.
eps : float
Accuracy for comparing coordinates.
Returns
-------
base : array
The basis (shape (n_coor, 1, n_base)) or its first derivative
(shape (n_coor, dim, n_base)) or its second derivative (shape
(n_coor, dim, dim, n_base)) evaluated in the given points. An
additional axis is pre-pended of length n_cell, if `ori` is given,
or of length 1, if `force_axis` is True.
Notes
-----
If coors.ndim == 3, several point sets are assumed, with equal number
of points in each of them. This is the case, for example, of the
values of the volume base functions on the element facets. The indexing
(of bf_b(g)) is then (ifa,iqp,:,n_ep), so that the facet can be set in
C using FMF_SetCell.
"""
coors = nm.asarray(coors)
if not coors.ndim in (2, 3):
raise ValueError('coordinates must have 2 or 3 dimensions! (%d)'
% coors.ndim)
if (coors.ndim == 2):
base = self._eval_base(coors, diff=diff, ori=ori,
suppress_errors=suppress_errors,
eps=eps)
if (base.ndim == 3) and force_axis:
base = base[None, ...]
if not base.flags['C_CONTIGUOUS']:
base = nm.ascontiguousarray(base)
else: # Several point sets.
if diff:
bdim = self.geometry.dim
else:
bdim = 1
base = nm.empty((coors.shape[0], coors.shape[1],
bdim, self.n_nod), dtype=nm.float64)
for ii, _coors in enumerate(coors):
base[ii] = self._eval_base(_coors, diff=diff, ori=ori,
suppress_errors=suppress_errors,
eps=eps)
if transform is not None:
base = transform_basis(transform, base)
return base
def get_mtx_i(self):
return self.mtx_i
def describe_nodes(self):
return NodeDescription(self.nts, self.nodes)
class LagrangePolySpace(PolySpace):
def create_context(self, cmesh, eps, check_errors, i_max, newton_eps,
tdim=None):
from sfepy.discrete.fem.extmods.bases import CLagrangeContext
ref_coors = self.geometry.coors
if cmesh is not None:
mesh_coors = cmesh.coors
conn = cmesh.get_conn(cmesh.tdim, 0)
mesh_conn = conn.indices.reshape(cmesh.n_el, -1).astype(nm.int32)
if tdim is None:
tdim = cmesh.tdim
else:
mesh_coors = mesh_conn = None
if tdim is None:
raise ValueError('supply either cmesh or tdim!')
ctx = CLagrangeContext(order=self.order,
tdim=tdim,
nodes=self.nodes,
ref_coors=ref_coors,
mesh_coors=mesh_coors,
mesh_conn=mesh_conn,
mtx_i=self.get_mtx_i(),
eps=eps,
check_errors=check_errors,
i_max=i_max,
newton_eps=newton_eps)
return ctx
def _eval_base(self, coors, diff=0, ori=None,
suppress_errors=False, eps=1e-15):
"""
See :func:`PolySpace.eval_base()`.
"""
if diff == 2:
base = self._eval_hessian(coors)
else:
base = self.eval_ctx.evaluate(coors, diff=diff,
eps=eps,
check_errors=not suppress_errors)
return base
class LagrangeSimplexPolySpace(LagrangePolySpace):
"""Lagrange polynomial space on a simplex domain."""
name = 'lagrange_simplex'
def __init__(self, name, geometry, order, init_context=True):
PolySpace.__init__(self, name, geometry, order)
n_v = geometry.n_vertex
mtx = nm.ones((n_v, n_v), nm.float64)
mtx[0:n_v-1,:] = nm.transpose(geometry.coors)
self.mtx_i = nm.ascontiguousarray(nla.inv(mtx))
self.rhs = nm.ones((n_v,), nm.float64)
self.nodes, self.nts, node_coors = self._define_nodes()
self.node_coors = nm.ascontiguousarray(node_coors)
self.n_nod = self.nodes.shape[0]
if init_context:
self.eval_ctx = self.create_context(None, 0, 1e-15, 100, 1e-8,
tdim=n_v - 1)
else:
self.eval_ctx = None
def _define_nodes(self):
# Factorial.
fac = lambda n : reduce(lambda a, b : a * (b + 1), range(n), 1)
geometry = self.geometry
n_v, dim = geometry.n_vertex, geometry.dim
order = self.order
n_nod = fac(order + dim) // (fac(order) * fac(dim))
## print n_nod, gd
nodes = nm.zeros((n_nod, n_v), nm.int32)
nts = nm.zeros((n_nod, 2), nm.int32)
if order == 0:
nts[0,:] = [3, 0]
nodes[0,:] = nm.zeros((n_v,), nm.int32)
else:
iseq = 0
# Vertex nodes.
nts[0:n_v,0] = 0
nts[0:n_v,1] = nm.arange(n_v, dtype = nm.int32)
aux = order * nm.identity(n_v, dtype = nm.int32)
nodes[iseq:iseq+n_v,:] = aux
iseq += n_v
if dim == 1:
iseq = LagrangeNodes.append_edges(nodes, nts, iseq, 3,
[[0, 1]], order)
elif dim == 2:
iseq = LagrangeNodes.append_edges(nodes, nts, iseq, 1,
geometry.edges, order)
iseq = LagrangeNodes.append_faces(nodes, nts, iseq, 3,
[[0, 1, 2]], order)
elif dim == 3:
iseq = LagrangeNodes.append_edges(nodes, nts, iseq, 1,
geometry.edges, order)
iseq = LagrangeNodes.append_faces(nodes, nts, iseq, 2,
geometry.faces, order)
iseq = LagrangeNodes.append_bubbles(nodes, nts, iseq, 3,
order)
else:
raise NotImplementedError
## print nm.concatenate((nts, nodes), 1)
# Check orders.
orders = nm.sum(nodes, 1)
if not nm.all(orders == order):
raise AssertionError('wrong orders! (%d == all of %s)'
% (order, orders))
# Coordinates of the nodes.
if order == 0:
tmp = nm.ones((n_nod, n_v), nm.int32)
node_coors = nm.dot(tmp, geometry.coors) / n_v
else:
node_coors = nm.dot(nodes, geometry.coors) / order
return nodes, nts, node_coors
def _eval_hessian(self, coors):
"""
Evaluate the second derivatives of the basis.
"""
def get_bc(coor):
rhs = nm.concatenate((coor, [1]))
bc = nm.dot(self.mtx_i, rhs)
return bc
def get_val(bc, node, omit=[]):
val = nm.ones(1, nm.float64)
for i1 in range(bc.shape[0]):
if i1 in omit: continue
for i2 in range(node[i1]):
val *= (self.order * bc[i1] - i2) / (i2 + 1.0)
return val
def get_der(bc1, node1, omit=[]):
val = nm.zeros(1, nm.float64)
for i1 in range(node1):
if i1 in omit: continue
aux = nm.ones(1, nm.float64)
for i2 in range(node1):
if (i1 == i2) or (i2 in omit): continue
aux *= (self.order * bc1 - i2) / (i2 + 1.0)
val += aux * self.order / (i1 + 1.0)
return val
n_v = self.mtx_i.shape[0]
dim = n_v - 1
mi = self.mtx_i[:, :dim]
bfgg = nm.zeros((coors.shape[0], dim, dim, self.n_nod),
dtype=nm.float64)
for ic, coor in enumerate(coors):
bc = get_bc(coor)
for ii, node in enumerate(self.nodes):
for ig1, bc1 in enumerate(bc): # 1. derivative w.r.t. bc1.
for ig2, bc2 in enumerate(bc): # 2. derivative w.r.t. bc2.
if ig1 == ig2:
val = get_val(bc, node, omit=[ig1])
vv = 0.0
for i1 in range(node[ig1]):
aux = get_der(bc2, node[ig2], omit=[i1])
vv += aux * self.order / (i1 + 1.0)
val *= vv
else:
val = get_val(bc, node, omit=[ig1, ig2])
val *= get_der(bc1, node[ig1])
val *= get_der(bc2, node[ig2])
bfgg[ic, :, :, ii] += val * mi[ig1] * mi[ig2][:, None]
return bfgg
class LagrangeSimplexBPolySpace(LagrangeSimplexPolySpace):
"""Lagrange polynomial space with forced bubble function on a simplex
domain."""
name = 'lagrange_simplex_bubble'
def __init__(self, name, geometry, order, init_context=True):
LagrangeSimplexPolySpace.__init__(self, name, geometry, order,
init_context=False)
nodes, nts, node_coors = self.nodes, self.nts, self.node_coors
shape = [nts.shape[0] + 1, 2]
nts = nm.resize(nts, shape)
nts[-1,:] = [3, 0]
shape = [nodes.shape[0] + 1, nodes.shape[1]]
nodes = nm.resize(nodes, shape)
# Make a 'hypercubic' (cubic in 2D) node.
nodes[-1,:] = 1
n_v = self.geometry.n_vertex
tmp = nm.ones((n_v,), nm.int32)
node_coors = nm.vstack((node_coors,
nm.dot(tmp, self.geometry.coors) / n_v))
self.nodes, self.nts = nodes, nts
self.node_coors = nm.ascontiguousarray(node_coors)
self.bnode = nodes[-1:,:]
self.n_nod = self.nodes.shape[0]
if init_context:
self.eval_ctx = self.create_context(None, 0, 1e-15, 100, 1e-8,
tdim=n_v - 1)
else:
self.eval_ctx = None
def create_context(self, *args, **kwargs):
ctx = LagrangePolySpace.create_context(self, *args, **kwargs)
ctx.is_bubble = 1
return ctx
class LagrangeTensorProductPolySpace(LagrangePolySpace):
"""Lagrange polynomial space on a tensor product domain."""
name = 'lagrange_tensor_product'
def __init__(self, name, geometry, order, init_context=True):
PolySpace.__init__(self, name, geometry, order)
g1d = Struct(n_vertex = 2,
dim = 1,
coors = self.bbox[:,0:1].copy())
self.ps1d = LagrangeSimplexPolySpace('P_aux', g1d, order,
init_context=False)
self.nodes, self.nts, node_coors = self._define_nodes()
self.node_coors = nm.ascontiguousarray(node_coors)
self.n_nod = self.nodes.shape[0]
if init_context:
tdim = int(nm.sqrt(geometry.n_vertex))
self.eval_ctx = self.create_context(None, 0, 1e-15, 100, 1e-8,
tdim=tdim)
else:
self.eval_ctx = None
def _define_nodes(self):
geometry = self.geometry
order = self.order
n_v, dim = geometry.n_vertex, geometry.dim
vertex_map = order * nm.array(vertex_maps[dim], dtype=nm.int32)
n_nod = (order + 1) ** dim
nodes = nm.zeros((n_nod, 2 * dim), nm.int32)
nts = nm.zeros((n_nod, 2), nm.int32)
if order == 0:
nts[0,:] = [3, 0]
nodes[0,:] = nm.zeros((n_nod,), nm.int32)
else:
iseq = 0
# Vertex nodes.
nts[0:n_v,0] = 0
nts[0:n_v,1] = nm.arange( n_v, dtype = nm.int32 )
order * nm.identity( n_v, dtype = nm.int32 )
if dim == 3:
for ii in range(n_v):
i1, i2, i3 = vertex_map[ii]
nodes[iseq,:] = [order - i1, i1,
order - i2, i2,
order - i3, i3]
iseq += 1
elif dim == 2:
for ii in range(n_v):
i1, i2 = vertex_map[ii]
nodes[iseq,:] = [order - i1, i1, order - i2, i2]
iseq += 1
else:
for ii in range(n_v):
i1 = vertex_map[ii][0]
nodes[iseq,:] = [order - i1, i1]
iseq += 1
if dim == 1:
iseq = LagrangeNodes.append_tp_edges(nodes, nts, iseq, 3,
[[0, 1]], order)
elif dim == 2:
iseq = LagrangeNodes.append_tp_edges(nodes, nts, iseq, 1,
geometry.edges, order)
iseq = LagrangeNodes.append_tp_faces(nodes, nts, iseq, 3,
[[0, 1, 2, 3]], order)
elif dim == 3:
iseq = LagrangeNodes.append_tp_edges(nodes, nts, iseq, 1,
geometry.edges, order)
iseq = LagrangeNodes.append_tp_faces(nodes, nts, iseq, 2,
geometry.faces, order)
iseq = LagrangeNodes.append_tp_bubbles(nodes, nts, iseq, 3,
order)
else:
raise NotImplementedError
# Check orders.
orders = nm.sum(nodes, 1)
if not nm.all(orders == order * dim):
raise AssertionError('wrong orders! (%d == all of %s)'
% (order * dim, orders))
# Coordinates of the nodes.
if order == 0:
tmp = nm.ones((n_nod, n_v), nm.int32)
node_coors = nm.dot(tmp, geometry.coors) / n_v
else:
c_min, c_max = self.bbox[:,0]
cr = nm.arange(2 * dim)
node_coors = (nodes[:,cr[::2]] * c_min
+ nodes[:,cr[1::2]] * c_max) / order
return nodes, nts, node_coors
def _eval_base_debug(self, coors, diff=False, ori=None,
suppress_errors=False, eps=1e-15):
"""Python version of eval_base()."""
dim = self.geometry.dim
ev = self.ps1d.eval_base
if diff:
base = nm.ones((coors.shape[0], dim, self.n_nod), dtype=nm.float64)
for ii in range(dim):
self.ps1d.nodes = self.nodes[:,2*ii:2*ii+2].copy()
self.ps1d.n_nod = self.n_nod
for iv in range(dim):
if ii == iv:
base[:,iv:iv+1,:] *= ev(coors[:,ii:ii+1].copy(),
diff=True,
suppress_errors=suppress_errors,
eps=eps)
else:
base[:,iv:iv+1,:] *= ev(coors[:,ii:ii+1].copy(),
diff=False,
suppress_errors=suppress_errors,
eps=eps)
else:
base = nm.ones((coors.shape[0], 1, self.n_nod), dtype=nm.float64)
for ii in range(dim):
self.ps1d.nodes = self.nodes[:,2*ii:2*ii+2].copy()
self.ps1d.n_nod = self.n_nod
base *= ev(coors[:,ii:ii+1].copy(),
diff=diff,
suppress_errors=suppress_errors,
eps=eps)
return base
def _eval_hessian(self, coors):
"""
Evaluate the second derivatives of the basis.
"""
evh = self.ps1d.eval_base
dim = self.geometry.dim
bfgg = nm.zeros((coors.shape[0], dim, dim, self.n_nod),
dtype=nm.float64)
v0s = []
v1s = []
v2s = []
for ii in range(dim):
self.ps1d.nodes = self.nodes[:,2*ii:2*ii+2].copy()
self.ps1d.n_nod = self.n_nod
ev = self.ps1d.create_context(None, 0, 1e-15, 100, 1e-8,
tdim=1).evaluate
v0s.append(ev(coors[:, ii:ii+1].copy())[:, 0, :])
v1s.append(ev(coors[:, ii:ii+1].copy(), diff=1)[:, 0, :])
v2s.append(evh(coors[:, ii:ii+1], diff=2)[:, 0, 0, :])
for ir in range(dim):
vv = v2s[ir] # Destroys v2s!
for ik in range(dim):
if ik == ir: continue
vv *= v0s[ik]
bfgg[:, ir, ir, :] = vv
for ic in range(dim):
if ic == ir: continue
val = v1s[ir] * v1s[ic]
for ik in range(dim):
if (ik == ir) or (ik == ic): continue
val *= v0s[ik]
bfgg[:, ir, ic, :] += val
return bfgg
def get_mtx_i(self):
return self.ps1d.mtx_i
class LobattoTensorProductPolySpace(PolySpace):
"""
Hierarchical polynomial space using Lobatto functions.
Each row of the `nodes` attribute defines indices of Lobatto functions that
need to be multiplied together to evaluate the corresponding shape
function. This defines the ordering of basis functions on the reference
element.
"""
name = 'lobatto_tensor_product'
def __init__(self, name, geometry, order):
PolySpace.__init__(self, name, geometry, order)
aux = self._define_nodes()
self.nodes, self.nts, node_coors, self.face_axes, self.sfnodes = aux
self.node_coors = nm.ascontiguousarray(node_coors)
self.n_nod = self.nodes.shape[0]
aux = nm.where(self.nodes > 0, self.nodes, 1)
self.node_orders = nm.prod(aux, axis=1)
self.edge_indx = nm.where(self.nts[:, 0] == 1)[0]
self.face_indx = nm.where(self.nts[:, 0] == 2)[0]
self.face_axes_nodes = self._get_face_axes_nodes(self.face_axes)
def _get_counts(self):
order = self.order
dim = self.geometry.dim
n_nod = (order + 1) ** dim
n_per_edge = (order - 1)
n_per_face = (order - 1) ** (dim - 1)
n_bubble = (order - 1) ** dim
return n_nod, n_per_edge, n_per_face, n_bubble
def _define_nodes(self):
geometry = self.geometry
order = self.order
n_v, dim = geometry.n_vertex, geometry.dim
n_nod, n_per_edge, n_per_face, n_bubble = self._get_counts()
nodes = nm.zeros((n_nod, dim), nm.int32)
nts = nm.zeros((n_nod, 2), nm.int32)
# Vertex nodes.
nts[0:n_v, 0] = 0
nts[0:n_v, 1] = nm.arange(n_v, dtype=nm.int32)
nodes[0:n_v] = nm.array(vertex_maps[dim], dtype=nm.int32)
ii = n_v
# Edge nodes.
if (dim > 1) and (n_per_edge > 0):
ik = nm.arange(2, order + 1, dtype=nm.int32)
zo = nm.zeros((n_per_edge, 2), dtype=nm.int32)
zo[:, 1] = 1
for ie, edge in enumerate(geometry.edges):
n1, n2 = nodes[edge]
ifix = nm.where(n1 == n2)[0]
irun = nm.where(n1 != n2)[0][0]
ic = n1[ifix]
nodes[ii:ii + n_per_edge, ifix] = zo[:, ic]
nodes[ii:ii + n_per_edge, irun] = ik
nts[ii:ii + n_per_edge] = [[1, ie]]
ii += n_per_edge
# 3D face nodes.
face_axes = []
sfnodes = None
if (dim == 3) and (n_per_face > 0):
n_face = len(geometry.faces)
sfnodes = nm.zeros((n_per_face * n_face, dim), nm.int32)
ii0 = ii
ik = nm.arange(2, order + 1, dtype=nm.int32)
zo = nm.zeros((n_per_face, 2), dtype=nm.int32)
zo[:, 1] = 1
for ifa, face in enumerate(geometry.faces):
ns = nodes[face]
diff = nm.diff(ns, axis=0)
asum = nm.abs(diff).sum(axis=0)
ifix = nm.where(asum == 0)[0][0]
ic = ns[0, ifix]
irun1 = nm.where(asum == 2)[0][0]
irun2 = nm.where(asum == 1)[0][0]
iy, ix = nm.meshgrid(ik, ik)
nodes[ii:ii + n_per_face, ifix] = zo[:, ic]
nodes[ii:ii + n_per_face, irun1] = ix.ravel()
nodes[ii:ii + n_per_face, irun2] = iy.ravel()
nts[ii:ii + n_per_face] = [[2, ifa]]
ij = ii - ii0
sfnodes[ij:ij + n_per_face, ifix] = zo[:, ic]
sfnodes[ij:ij + n_per_face, irun1] = iy.ravel()
sfnodes[ij:ij + n_per_face, irun2] = ix.ravel()
face_axes.append([irun1, irun2])
ii += n_per_face
face_axes = nm.array(face_axes)
# Bubble nodes.
if n_bubble > 0:
ik = nm.arange(2, order + 1, dtype=nm.int32)
nodes[ii:] = nm.array([aux for aux in combine([ik] * dim)])
nts[ii:ii + n_bubble] = [[3, 0]]
ii += n_bubble
assert_(ii == n_nod)
# Coordinates of the "nodes". All nodes on a facet have the same
# coordinates - the centre of the facet.
c_min, c_max = self.bbox[:, 0]
node_coors = nm.zeros(nodes.shape, dtype=nm.float64)
node_coors[:n_v] = nodes[:n_v]
if (dim > 1) and (n_per_edge > 0):
ie = nm.where(nts[:, 0] == 1)[0]
node_coors[ie] = node_coors[geometry.edges[nts[ie, 1]]].mean(1)
if (dim == 3) and (n_per_face > 0):
ifa = nm.where(nts[:, 0] == 2)[0]
node_coors[ifa] = node_coors[geometry.faces[nts[ifa, 1]]].mean(1)
if n_bubble > 0:
ib = nm.where(nts[:, 0] == 3)[0]
node_coors[ib] = node_coors[geometry.conn].mean(0)
return nodes, nts, node_coors, face_axes, sfnodes
def _get_face_axes_nodes(self, face_axes):
if not len(face_axes): return None
nodes = self.nodes[self.face_indx]
n_per_face = self._get_counts()[2]
anodes = nm.tile(nodes[:n_per_face, face_axes[0]], (6, 1))
return anodes
def _eval_base(self, coors, diff=False, ori=None,
suppress_errors=False, eps=1e-15):
"""
See PolySpace.eval_base().
"""
from .extmods.lobatto_bases import eval_lobatto_tensor_product as ev
c_min, c_max = self.bbox[:, 0]
base = ev(coors, self.nodes, c_min, c_max, self.order, diff)
if ori is not None:
ebase = nm.tile(base, (ori.shape[0], 1, 1, 1))
if self.edge_indx.shape[0]:
# Orient edge functions.
ie, ii = nm.where(ori[:, self.edge_indx] == 1)
ii = self.edge_indx[ii]
ebase[ie, :, :, ii] *= -1.0
if self.face_indx.shape[0]:
# Orient face functions.
fori = ori[:, self.face_indx]
# ... normal axis order
ie, ii = nm.where((fori == 1) | (fori == 2))
ii = self.face_indx[ii]
ebase[ie, :, :, ii] *= -1.0
# ... swapped axis order
sbase = ev(coors, self.sfnodes, c_min, c_max, self.order, diff)
sbase = | insert_strided_axis(sbase, 0, ori.shape[0]) | sfepy.linalg.insert_strided_axis |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import find_subclasses, assert_, Struct
from sfepy.linalg import combine, insert_strided_axis
from six.moves import range
from functools import reduce
# Requires fixed vertex numbering!
vertex_maps = {3 : [[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1]],
2 : [[0, 0],
[1, 0],
[1, 1],
[0, 1]],
1 : [[0],
[1]]}
def transform_basis(transform, bf):
"""
Transform a basis `bf` using `transform` array of matrices.
"""
if bf.ndim == 3:
nbf = nm.einsum('cij,qdj->cqdi', transform, bf)
else:
nbf = nm.einsum('cij,oqdj->cqdi', transform, bf)
return nbf
class LagrangeNodes(Struct):
"""Helper class for defining nodes of Lagrange elements."""
@staticmethod
def append_edges(nodes, nts, iseq, nt, edges, order):
delta = 1.0 / float(order)
for ii, edge in enumerate(edges):
n1 = nodes[edge[0],:].copy()
n2 = nodes[edge[1],:].copy()
for ie in range(order - 1):
c2 = ie + 1
c1 = order - c2
nts[iseq] = [nt, ii]
aux = [int(round(tmp)) for tmp in delta * (c1 * n1 + c2 * n2)]
nodes[iseq,:] = aux
iseq += 1
return iseq
@staticmethod
def append_faces(nodes, nts, iseq, nt, faces, order):
delta = 1.0 / float(order)
for ii, face in enumerate(faces):
n1 = nodes[face[0],:].copy()
n2 = nodes[face[1],:].copy()
n3 = nodes[face[2],:].copy()
for i1 in range(order - 2):
for i2 in range(order - 2 - i1):
c3 = i1 + 1
c2 = i2 + 1
c1 = order - c3 - c2
nts[iseq] = [nt, ii]
aux = [int(round(tmp)) for tmp
in delta * (c1 * n1 + c2 * n2 + c3 * n3)]
nodes[iseq,:] = aux
iseq += 1
return iseq
@staticmethod
def append_bubbles(nodes, nts, iseq, nt, order):
delta = 1.0 / float(order)
n1 = nodes[0,:].copy()
n2 = nodes[1,:].copy()
n3 = nodes[2,:].copy()
n4 = nodes[3,:].copy()
for i1 in range(order - 3):
for i2 in range(order - 3):
for i3 in range(order - 3 - i1 - i2):
c4 = i1 + 1
c3 = i2 + 1
c2 = i3 + 1
c1 = order - c4 - c3 - c2
nts[iseq] = [nt, 0]
aux = [int(round(tmp)) for tmp
in delta * (c1 * n1 + c2 * n2 + c3 * n3 + c4 * n4)]
nodes[iseq,:] = aux
iseq += 1
return iseq
@staticmethod
def append_tp_edges(nodes, nts, iseq, nt, edges, ao):
delta = 1.0 / float(ao)
for ii, edge in enumerate(edges):
n1 = nodes[edge[0],:].copy()
n2 = nodes[edge[1],:].copy()
for ie in range(ao - 1):
c2 = ie + 1
c1 = ao - c2
nts[iseq] = [nt, ii]
aux = [int(round(tmp)) for tmp in delta * (c1 * n1 + c2 * n2)]
nodes[iseq,:] = aux
iseq += 1
return iseq
@staticmethod
def append_tp_faces(nodes, nts, iseq, nt, faces, ao):
delta = 1.0 / (float(ao) ** 2)
for ii, face in enumerate( faces ):
n1 = nodes[face[0],:].copy()
n2 = nodes[face[1],:].copy()
n3 = nodes[face[2],:].copy()
n4 = nodes[face[3],:].copy()
for i1 in range(ao - 1):
for i2 in range(ao - 1):
c4 = i1 + 1
c3 = i2 + 1
c2 = ao - c4
c1 = ao - c3
nts[iseq] = [nt, ii]
aux = [int(round(tmp)) for tmp
in delta * (c1 * c2 * n1 + c2 * c3 * n2
+ c3 * c4 * n3 + c4 * c1 * n4)]
nodes[iseq,:] = aux
iseq += 1
return iseq
@staticmethod
def append_tp_bubbles(nodes, nts, iseq, nt, ao):
delta = 1.0 / (float(ao) ** 3)
n1 = nodes[0,:].copy()
n2 = nodes[1,:].copy()
n3 = nodes[2,:].copy()
n4 = nodes[3,:].copy()
n5 = nodes[4,:].copy()
n6 = nodes[5,:].copy()
n7 = nodes[6,:].copy()
n8 = nodes[7,:].copy()
for i1 in range(ao - 1):
for i2 in range(ao - 1):
for i3 in range(ao - 1):
c6 = i1 + 1
c5 = i2 + 1
c4 = i3 + 1
c3 = ao - c6
c2 = ao - c5
c1 = ao - c4
nts[iseq] = [nt, 0]
aux = [int(round(tmp)) for tmp
in delta * (c1 * c2 * c3 * n1 + c4 * c2 * c3 * n2
+ c5 * c4 * c3 * n3 + c1 * c3 * c5 * n4
+ c1 * c2 * c6 * n5 + c4 * c2 * c6 * n6
+ c5 * c4 * c6 * n7 + c1 * c6 * c5 * n8)]
nodes[iseq,:] = aux
iseq += 1
return iseq
class NodeDescription(Struct):
"""
Describe FE nodes defined on different parts of a reference element.
"""
def _describe_facets(self, ii):
nts = self.node_types[ii]
ik = nm.where(nts[1:,1] > nts[:-1,1])[0]
if len(ik) == 0:
ifacets = None
n_dof = 0
else:
ii = ii.astype(nm.int32)
ik = nm.r_[0, ik + 1, nts.shape[0]]
ifacets = [ii[ik[ir] : ik[ir+1]] for ir in range(len(ik) - 1)]
n_dof = len(ifacets[0])
return ifacets, n_dof
def _describe_other(self, ii):
if len(ii):
return ii, len(ii)
else:
return None, 0
def _get_facet_nodes(self, ifacets, nodes):
if ifacets is None:
return None
else:
return [nodes[ii] for ii in ifacets]
def _get_nodes(self, ii, nodes):
if ii is None:
return None
else:
return nodes[ii]
def __init__(self, node_types, nodes):
self.node_types = node_types
# Vertex nodes.
ii = nm.where(node_types[:,0] == 0)[0]
self.vertex, self.n_vertex_nod = self._describe_other(ii)
self.vertex_nodes = self._get_nodes(self.vertex, nodes)
# Edge nodes.
ii = nm.where(node_types[:,0] == 1)[0]
self.edge, self.n_edge_nod = self._describe_facets(ii)
self.edge_nodes = self._get_facet_nodes(self.edge, nodes)
# Face nodes.
ii = nm.where(node_types[:,0] == 2)[0]
self.face, self.n_face_nod = self._describe_facets(ii)
self.face_nodes = self._get_facet_nodes(self.face, nodes)
# Bubble nodes.
ii = nm.where(node_types[:,0] == 3)[0]
self.bubble, self.n_bubble_nod = self._describe_other(ii)
self.bubble_nodes = self._get_nodes(self.bubble, nodes)
def has_extra_nodes(self):
"""
Return True if the element has some edge, face or bubble nodes.
"""
return (self.n_edge_nod + self.n_face_nod + self.n_bubble_nod) > 0
class PolySpace(Struct):
"""Abstract polynomial space class."""
_all = None
keys = {
(1, 2) : 'simplex',
(2, 3) : 'simplex',
(3, 4) : 'simplex',
(2, 4) : 'tensor_product',
(3, 8) : 'tensor_product',
}
@staticmethod
def any_from_args(name, geometry, order, base='lagrange',
force_bubble=False):
"""
Construct a particular polynomial space classes according to the
arguments passed in.
"""
if name is None:
name = PolySpace.suggest_name(geometry, order, base, force_bubble)
if PolySpace._all is None:
PolySpace._all = find_subclasses(globals(), [PolySpace])
table = PolySpace._all
key = '%s_%s' % (base, PolySpace.keys[(geometry.dim,
geometry.n_vertex)])
if (geometry.name == '1_2') and (key not in table):
key = '%s_%s' % (base, 'tensor_product')
if force_bubble:
key += '_bubble'
return table[key](name, geometry, order)
@staticmethod
def suggest_name(geometry, order, base='lagrange',
force_bubble=False):
"""
Suggest the polynomial space name given its constructor parameters.
"""
aux = geometry.get_interpolation_name()[:-1]
if force_bubble:
return aux + ('%dB' % order)
else:
return aux + ('%d' % order)
def __init__(self, name, geometry, order):
self.name = name
self.geometry = geometry
self.order = order
self.bbox = nm.vstack((geometry.coors.min(0), geometry.coors.max(0)))
def eval_base(self, coors, diff=0, ori=None, force_axis=False,
transform=None, suppress_errors=False, eps=1e-15):
"""
Evaluate the basis or its first or second derivatives in points given
by coordinates. The real work is done in _eval_base() implemented in
subclasses.
Note that the second derivative code is a work-in-progress and only
`coors` and `transform` arguments are used.
Parameters
----------
coors : array_like
The coordinates of points where the basis is evaluated. See Notes.
diff : 0, 1 or 2
If nonzero, return the given derivative.
ori : array_like, optional
Optional orientation of element facets for per element basis.
force_axis : bool
If True, force the resulting array shape to have one more axis even
when `ori` is None.
transform : array_like, optional
The basis transform array.
suppress_errors : bool
If True, do not report points outside the reference domain.
eps : float
Accuracy for comparing coordinates.
Returns
-------
base : array
The basis (shape (n_coor, 1, n_base)) or its first derivative
(shape (n_coor, dim, n_base)) or its second derivative (shape
(n_coor, dim, dim, n_base)) evaluated in the given points. An
additional axis is pre-pended of length n_cell, if `ori` is given,
or of length 1, if `force_axis` is True.
Notes
-----
If coors.ndim == 3, several point sets are assumed, with equal number
of points in each of them. This is the case, for example, of the
values of the volume base functions on the element facets. The indexing
(of bf_b(g)) is then (ifa,iqp,:,n_ep), so that the facet can be set in
C using FMF_SetCell.
"""
coors = nm.asarray(coors)
if not coors.ndim in (2, 3):
raise ValueError('coordinates must have 2 or 3 dimensions! (%d)'
% coors.ndim)
if (coors.ndim == 2):
base = self._eval_base(coors, diff=diff, ori=ori,
suppress_errors=suppress_errors,
eps=eps)
if (base.ndim == 3) and force_axis:
base = base[None, ...]
if not base.flags['C_CONTIGUOUS']:
base = nm.ascontiguousarray(base)
else: # Several point sets.
if diff:
bdim = self.geometry.dim
else:
bdim = 1
base = nm.empty((coors.shape[0], coors.shape[1],
bdim, self.n_nod), dtype=nm.float64)
for ii, _coors in enumerate(coors):
base[ii] = self._eval_base(_coors, diff=diff, ori=ori,
suppress_errors=suppress_errors,
eps=eps)
if transform is not None:
base = transform_basis(transform, base)
return base
def get_mtx_i(self):
return self.mtx_i
def describe_nodes(self):
return NodeDescription(self.nts, self.nodes)
class LagrangePolySpace(PolySpace):
def create_context(self, cmesh, eps, check_errors, i_max, newton_eps,
tdim=None):
from sfepy.discrete.fem.extmods.bases import CLagrangeContext
ref_coors = self.geometry.coors
if cmesh is not None:
mesh_coors = cmesh.coors
conn = cmesh.get_conn(cmesh.tdim, 0)
mesh_conn = conn.indices.reshape(cmesh.n_el, -1).astype(nm.int32)
if tdim is None:
tdim = cmesh.tdim
else:
mesh_coors = mesh_conn = None
if tdim is None:
raise ValueError('supply either cmesh or tdim!')
ctx = CLagrangeContext(order=self.order,
tdim=tdim,
nodes=self.nodes,
ref_coors=ref_coors,
mesh_coors=mesh_coors,
mesh_conn=mesh_conn,
mtx_i=self.get_mtx_i(),
eps=eps,
check_errors=check_errors,
i_max=i_max,
newton_eps=newton_eps)
return ctx
def _eval_base(self, coors, diff=0, ori=None,
suppress_errors=False, eps=1e-15):
"""
See :func:`PolySpace.eval_base()`.
"""
if diff == 2:
base = self._eval_hessian(coors)
else:
base = self.eval_ctx.evaluate(coors, diff=diff,
eps=eps,
check_errors=not suppress_errors)
return base
class LagrangeSimplexPolySpace(LagrangePolySpace):
"""Lagrange polynomial space on a simplex domain."""
name = 'lagrange_simplex'
def __init__(self, name, geometry, order, init_context=True):
PolySpace.__init__(self, name, geometry, order)
n_v = geometry.n_vertex
mtx = nm.ones((n_v, n_v), nm.float64)
mtx[0:n_v-1,:] = nm.transpose(geometry.coors)
self.mtx_i = nm.ascontiguousarray(nla.inv(mtx))
self.rhs = nm.ones((n_v,), nm.float64)
self.nodes, self.nts, node_coors = self._define_nodes()
self.node_coors = nm.ascontiguousarray(node_coors)
self.n_nod = self.nodes.shape[0]
if init_context:
self.eval_ctx = self.create_context(None, 0, 1e-15, 100, 1e-8,
tdim=n_v - 1)
else:
self.eval_ctx = None
def _define_nodes(self):
# Factorial.
fac = lambda n : reduce(lambda a, b : a * (b + 1), range(n), 1)
geometry = self.geometry
n_v, dim = geometry.n_vertex, geometry.dim
order = self.order
n_nod = fac(order + dim) // (fac(order) * fac(dim))
## print n_nod, gd
nodes = nm.zeros((n_nod, n_v), nm.int32)
nts = nm.zeros((n_nod, 2), nm.int32)
if order == 0:
nts[0,:] = [3, 0]
nodes[0,:] = nm.zeros((n_v,), nm.int32)
else:
iseq = 0
# Vertex nodes.
nts[0:n_v,0] = 0
nts[0:n_v,1] = nm.arange(n_v, dtype = nm.int32)
aux = order * nm.identity(n_v, dtype = nm.int32)
nodes[iseq:iseq+n_v,:] = aux
iseq += n_v
if dim == 1:
iseq = LagrangeNodes.append_edges(nodes, nts, iseq, 3,
[[0, 1]], order)
elif dim == 2:
iseq = LagrangeNodes.append_edges(nodes, nts, iseq, 1,
geometry.edges, order)
iseq = LagrangeNodes.append_faces(nodes, nts, iseq, 3,
[[0, 1, 2]], order)
elif dim == 3:
iseq = LagrangeNodes.append_edges(nodes, nts, iseq, 1,
geometry.edges, order)
iseq = LagrangeNodes.append_faces(nodes, nts, iseq, 2,
geometry.faces, order)
iseq = LagrangeNodes.append_bubbles(nodes, nts, iseq, 3,
order)
else:
raise NotImplementedError
## print nm.concatenate((nts, nodes), 1)
# Check orders.
orders = nm.sum(nodes, 1)
if not nm.all(orders == order):
raise AssertionError('wrong orders! (%d == all of %s)'
% (order, orders))
# Coordinates of the nodes.
if order == 0:
tmp = nm.ones((n_nod, n_v), nm.int32)
node_coors = nm.dot(tmp, geometry.coors) / n_v
else:
node_coors = nm.dot(nodes, geometry.coors) / order
return nodes, nts, node_coors
def _eval_hessian(self, coors):
"""
Evaluate the second derivatives of the basis.
"""
def get_bc(coor):
rhs = nm.concatenate((coor, [1]))
bc = nm.dot(self.mtx_i, rhs)
return bc
def get_val(bc, node, omit=[]):
val = nm.ones(1, nm.float64)
for i1 in range(bc.shape[0]):
if i1 in omit: continue
for i2 in range(node[i1]):
val *= (self.order * bc[i1] - i2) / (i2 + 1.0)
return val
def get_der(bc1, node1, omit=[]):
val = nm.zeros(1, nm.float64)
for i1 in range(node1):
if i1 in omit: continue
aux = nm.ones(1, nm.float64)
for i2 in range(node1):
if (i1 == i2) or (i2 in omit): continue
aux *= (self.order * bc1 - i2) / (i2 + 1.0)
val += aux * self.order / (i1 + 1.0)
return val
n_v = self.mtx_i.shape[0]
dim = n_v - 1
mi = self.mtx_i[:, :dim]
bfgg = nm.zeros((coors.shape[0], dim, dim, self.n_nod),
dtype=nm.float64)
for ic, coor in enumerate(coors):
bc = get_bc(coor)
for ii, node in enumerate(self.nodes):
for ig1, bc1 in enumerate(bc): # 1. derivative w.r.t. bc1.
for ig2, bc2 in enumerate(bc): # 2. derivative w.r.t. bc2.
if ig1 == ig2:
val = get_val(bc, node, omit=[ig1])
vv = 0.0
for i1 in range(node[ig1]):
aux = get_der(bc2, node[ig2], omit=[i1])
vv += aux * self.order / (i1 + 1.0)
val *= vv
else:
val = get_val(bc, node, omit=[ig1, ig2])
val *= get_der(bc1, node[ig1])
val *= get_der(bc2, node[ig2])
bfgg[ic, :, :, ii] += val * mi[ig1] * mi[ig2][:, None]
return bfgg
class LagrangeSimplexBPolySpace(LagrangeSimplexPolySpace):
"""Lagrange polynomial space with forced bubble function on a simplex
domain."""
name = 'lagrange_simplex_bubble'
def __init__(self, name, geometry, order, init_context=True):
LagrangeSimplexPolySpace.__init__(self, name, geometry, order,
init_context=False)
nodes, nts, node_coors = self.nodes, self.nts, self.node_coors
shape = [nts.shape[0] + 1, 2]
nts = nm.resize(nts, shape)
nts[-1,:] = [3, 0]
shape = [nodes.shape[0] + 1, nodes.shape[1]]
nodes = nm.resize(nodes, shape)
# Make a 'hypercubic' (cubic in 2D) node.
nodes[-1,:] = 1
n_v = self.geometry.n_vertex
tmp = nm.ones((n_v,), nm.int32)
node_coors = nm.vstack((node_coors,
nm.dot(tmp, self.geometry.coors) / n_v))
self.nodes, self.nts = nodes, nts
self.node_coors = nm.ascontiguousarray(node_coors)
self.bnode = nodes[-1:,:]
self.n_nod = self.nodes.shape[0]
if init_context:
self.eval_ctx = self.create_context(None, 0, 1e-15, 100, 1e-8,
tdim=n_v - 1)
else:
self.eval_ctx = None
def create_context(self, *args, **kwargs):
ctx = LagrangePolySpace.create_context(self, *args, **kwargs)
ctx.is_bubble = 1
return ctx
class LagrangeTensorProductPolySpace(LagrangePolySpace):
"""Lagrange polynomial space on a tensor product domain."""
name = 'lagrange_tensor_product'
def __init__(self, name, geometry, order, init_context=True):
PolySpace.__init__(self, name, geometry, order)
g1d = Struct(n_vertex = 2,
dim = 1,
coors = self.bbox[:,0:1].copy())
self.ps1d = LagrangeSimplexPolySpace('P_aux', g1d, order,
init_context=False)
self.nodes, self.nts, node_coors = self._define_nodes()
self.node_coors = nm.ascontiguousarray(node_coors)
self.n_nod = self.nodes.shape[0]
if init_context:
tdim = int(nm.sqrt(geometry.n_vertex))
self.eval_ctx = self.create_context(None, 0, 1e-15, 100, 1e-8,
tdim=tdim)
else:
self.eval_ctx = None
def _define_nodes(self):
geometry = self.geometry
order = self.order
n_v, dim = geometry.n_vertex, geometry.dim
vertex_map = order * nm.array(vertex_maps[dim], dtype=nm.int32)
n_nod = (order + 1) ** dim
nodes = nm.zeros((n_nod, 2 * dim), nm.int32)
nts = nm.zeros((n_nod, 2), nm.int32)
if order == 0:
nts[0,:] = [3, 0]
nodes[0,:] = nm.zeros((n_nod,), nm.int32)
else:
iseq = 0
# Vertex nodes.
nts[0:n_v,0] = 0
nts[0:n_v,1] = nm.arange( n_v, dtype = nm.int32 )
order * nm.identity( n_v, dtype = nm.int32 )
if dim == 3:
for ii in range(n_v):
i1, i2, i3 = vertex_map[ii]
nodes[iseq,:] = [order - i1, i1,
order - i2, i2,
order - i3, i3]
iseq += 1
elif dim == 2:
for ii in range(n_v):
i1, i2 = vertex_map[ii]
nodes[iseq,:] = [order - i1, i1, order - i2, i2]
iseq += 1
else:
for ii in range(n_v):
i1 = vertex_map[ii][0]
nodes[iseq,:] = [order - i1, i1]
iseq += 1
if dim == 1:
iseq = LagrangeNodes.append_tp_edges(nodes, nts, iseq, 3,
[[0, 1]], order)
elif dim == 2:
iseq = LagrangeNodes.append_tp_edges(nodes, nts, iseq, 1,
geometry.edges, order)
iseq = LagrangeNodes.append_tp_faces(nodes, nts, iseq, 3,
[[0, 1, 2, 3]], order)
elif dim == 3:
iseq = LagrangeNodes.append_tp_edges(nodes, nts, iseq, 1,
geometry.edges, order)
iseq = LagrangeNodes.append_tp_faces(nodes, nts, iseq, 2,
geometry.faces, order)
iseq = LagrangeNodes.append_tp_bubbles(nodes, nts, iseq, 3,
order)
else:
raise NotImplementedError
# Check orders.
orders = nm.sum(nodes, 1)
if not nm.all(orders == order * dim):
raise AssertionError('wrong orders! (%d == all of %s)'
% (order * dim, orders))
# Coordinates of the nodes.
if order == 0:
tmp = nm.ones((n_nod, n_v), nm.int32)
node_coors = nm.dot(tmp, geometry.coors) / n_v
else:
c_min, c_max = self.bbox[:,0]
cr = nm.arange(2 * dim)
node_coors = (nodes[:,cr[::2]] * c_min
+ nodes[:,cr[1::2]] * c_max) / order
return nodes, nts, node_coors
def _eval_base_debug(self, coors, diff=False, ori=None,
suppress_errors=False, eps=1e-15):
"""Python version of eval_base()."""
dim = self.geometry.dim
ev = self.ps1d.eval_base
if diff:
base = nm.ones((coors.shape[0], dim, self.n_nod), dtype=nm.float64)
for ii in range(dim):
self.ps1d.nodes = self.nodes[:,2*ii:2*ii+2].copy()
self.ps1d.n_nod = self.n_nod
for iv in range(dim):
if ii == iv:
base[:,iv:iv+1,:] *= ev(coors[:,ii:ii+1].copy(),
diff=True,
suppress_errors=suppress_errors,
eps=eps)
else:
base[:,iv:iv+1,:] *= ev(coors[:,ii:ii+1].copy(),
diff=False,
suppress_errors=suppress_errors,
eps=eps)
else:
base = nm.ones((coors.shape[0], 1, self.n_nod), dtype=nm.float64)
for ii in range(dim):
self.ps1d.nodes = self.nodes[:,2*ii:2*ii+2].copy()
self.ps1d.n_nod = self.n_nod
base *= ev(coors[:,ii:ii+1].copy(),
diff=diff,
suppress_errors=suppress_errors,
eps=eps)
return base
def _eval_hessian(self, coors):
"""
Evaluate the second derivatives of the basis.
"""
evh = self.ps1d.eval_base
dim = self.geometry.dim
bfgg = nm.zeros((coors.shape[0], dim, dim, self.n_nod),
dtype=nm.float64)
v0s = []
v1s = []
v2s = []
for ii in range(dim):
self.ps1d.nodes = self.nodes[:,2*ii:2*ii+2].copy()
self.ps1d.n_nod = self.n_nod
ev = self.ps1d.create_context(None, 0, 1e-15, 100, 1e-8,
tdim=1).evaluate
v0s.append(ev(coors[:, ii:ii+1].copy())[:, 0, :])
v1s.append(ev(coors[:, ii:ii+1].copy(), diff=1)[:, 0, :])
v2s.append(evh(coors[:, ii:ii+1], diff=2)[:, 0, 0, :])
for ir in range(dim):
vv = v2s[ir] # Destroys v2s!
for ik in range(dim):
if ik == ir: continue
vv *= v0s[ik]
bfgg[:, ir, ir, :] = vv
for ic in range(dim):
if ic == ir: continue
val = v1s[ir] * v1s[ic]
for ik in range(dim):
if (ik == ir) or (ik == ic): continue
val *= v0s[ik]
bfgg[:, ir, ic, :] += val
return bfgg
def get_mtx_i(self):
return self.ps1d.mtx_i
class LobattoTensorProductPolySpace(PolySpace):
"""
Hierarchical polynomial space using Lobatto functions.
Each row of the `nodes` attribute defines indices of Lobatto functions that
need to be multiplied together to evaluate the corresponding shape
function. This defines the ordering of basis functions on the reference
element.
"""
name = 'lobatto_tensor_product'
def __init__(self, name, geometry, order):
PolySpace.__init__(self, name, geometry, order)
aux = self._define_nodes()
self.nodes, self.nts, node_coors, self.face_axes, self.sfnodes = aux
self.node_coors = nm.ascontiguousarray(node_coors)
self.n_nod = self.nodes.shape[0]
aux = nm.where(self.nodes > 0, self.nodes, 1)
self.node_orders = nm.prod(aux, axis=1)
self.edge_indx = nm.where(self.nts[:, 0] == 1)[0]
self.face_indx = nm.where(self.nts[:, 0] == 2)[0]
self.face_axes_nodes = self._get_face_axes_nodes(self.face_axes)
def _get_counts(self):
order = self.order
dim = self.geometry.dim
n_nod = (order + 1) ** dim
n_per_edge = (order - 1)
n_per_face = (order - 1) ** (dim - 1)
n_bubble = (order - 1) ** dim
return n_nod, n_per_edge, n_per_face, n_bubble
def _define_nodes(self):
geometry = self.geometry
order = self.order
n_v, dim = geometry.n_vertex, geometry.dim
n_nod, n_per_edge, n_per_face, n_bubble = self._get_counts()
nodes = nm.zeros((n_nod, dim), nm.int32)
nts = nm.zeros((n_nod, 2), nm.int32)
# Vertex nodes.
nts[0:n_v, 0] = 0
nts[0:n_v, 1] = nm.arange(n_v, dtype=nm.int32)
nodes[0:n_v] = nm.array(vertex_maps[dim], dtype=nm.int32)
ii = n_v
# Edge nodes.
if (dim > 1) and (n_per_edge > 0):
ik = nm.arange(2, order + 1, dtype=nm.int32)
zo = nm.zeros((n_per_edge, 2), dtype=nm.int32)
zo[:, 1] = 1
for ie, edge in enumerate(geometry.edges):
n1, n2 = nodes[edge]
ifix = nm.where(n1 == n2)[0]
irun = nm.where(n1 != n2)[0][0]
ic = n1[ifix]
nodes[ii:ii + n_per_edge, ifix] = zo[:, ic]
nodes[ii:ii + n_per_edge, irun] = ik
nts[ii:ii + n_per_edge] = [[1, ie]]
ii += n_per_edge
# 3D face nodes.
face_axes = []
sfnodes = None
if (dim == 3) and (n_per_face > 0):
n_face = len(geometry.faces)
sfnodes = nm.zeros((n_per_face * n_face, dim), nm.int32)
ii0 = ii
ik = nm.arange(2, order + 1, dtype=nm.int32)
zo = nm.zeros((n_per_face, 2), dtype=nm.int32)
zo[:, 1] = 1
for ifa, face in enumerate(geometry.faces):
ns = nodes[face]
diff = nm.diff(ns, axis=0)
asum = nm.abs(diff).sum(axis=0)
ifix = nm.where(asum == 0)[0][0]
ic = ns[0, ifix]
irun1 = nm.where(asum == 2)[0][0]
irun2 = nm.where(asum == 1)[0][0]
iy, ix = nm.meshgrid(ik, ik)
nodes[ii:ii + n_per_face, ifix] = zo[:, ic]
nodes[ii:ii + n_per_face, irun1] = ix.ravel()
nodes[ii:ii + n_per_face, irun2] = iy.ravel()
nts[ii:ii + n_per_face] = [[2, ifa]]
ij = ii - ii0
sfnodes[ij:ij + n_per_face, ifix] = zo[:, ic]
sfnodes[ij:ij + n_per_face, irun1] = iy.ravel()
sfnodes[ij:ij + n_per_face, irun2] = ix.ravel()
face_axes.append([irun1, irun2])
ii += n_per_face
face_axes = nm.array(face_axes)
# Bubble nodes.
if n_bubble > 0:
ik = nm.arange(2, order + 1, dtype=nm.int32)
nodes[ii:] = nm.array([aux for aux in | combine([ik] * dim) | sfepy.linalg.combine |
"""
Explicit time stepping solvers for use with DG FEM
"""
import numpy as nm
import numpy.linalg as nla
# sfepy imports
from sfepy.base.base import get_default, output
from sfepy.solvers import TimeSteppingSolver
from sfepy.solvers.solvers import SolverMeta
from sfepy.solvers.ts import TimeStepper
from sfepy.solvers.ts_solvers import standard_ts_call
class DGMultiStageTSS(TimeSteppingSolver):
"""Explicit time stepping solver with multistage solve_step method"""
__metaclass__ = SolverMeta
name = "ts.multistaged"
_parameters = [
('t0', 'float', 0.0, False,
'The initial time.'),
('t1', 'float', 1.0, False,
'The final time.'),
('dt', 'float', None, False,
'The time step. Used if `n_step` is not given.'),
('n_step', 'int', 10, False,
'The number of time steps. Has precedence over `dt`.'),
# this option is required by TimeSteppingSolver constructor
('quasistatic', 'bool', False, False,
"""If True, assume a quasistatic time-stepping. Then the non-linear
solver is invoked also for the initial time."""),
('limiters', 'dictionary', None, None,
"Limiters for DGFields, keys: field name, values: limiter class"),
]
def __init__(self, conf, nls=None, context=None, **kwargs):
TimeSteppingSolver.__init__(self, conf, nls=nls, context=context,
**kwargs)
self.ts = | TimeStepper.from_conf(self.conf) | sfepy.solvers.ts.TimeStepper.from_conf |
"""
Explicit time stepping solvers for use with DG FEM
"""
import numpy as nm
import numpy.linalg as nla
# sfepy imports
from sfepy.base.base import get_default, output
from sfepy.solvers import TimeSteppingSolver
from sfepy.solvers.solvers import SolverMeta
from sfepy.solvers.ts import TimeStepper
from sfepy.solvers.ts_solvers import standard_ts_call
class DGMultiStageTSS(TimeSteppingSolver):
"""Explicit time stepping solver with multistage solve_step method"""
__metaclass__ = SolverMeta
name = "ts.multistaged"
_parameters = [
('t0', 'float', 0.0, False,
'The initial time.'),
('t1', 'float', 1.0, False,
'The final time.'),
('dt', 'float', None, False,
'The time step. Used if `n_step` is not given.'),
('n_step', 'int', 10, False,
'The number of time steps. Has precedence over `dt`.'),
# this option is required by TimeSteppingSolver constructor
('quasistatic', 'bool', False, False,
"""If True, assume a quasistatic time-stepping. Then the non-linear
solver is invoked also for the initial time."""),
('limiters', 'dictionary', None, None,
"Limiters for DGFields, keys: field name, values: limiter class"),
]
def __init__(self, conf, nls=None, context=None, **kwargs):
TimeSteppingSolver.__init__(self, conf, nls=nls, context=context,
**kwargs)
self.ts = TimeStepper.from_conf(self.conf)
nd = self.ts.n_digit
self.stage_format = '---- ' + \
self.name + ' stage {}: linear system sol error {}'+ \
' ----'
format = '\n\n====== time %%e (step %%%dd of %%%dd) =====' % (nd, nd)
self.format = format
self.verbose = self.conf.verbose
self.post_stage_hook = lambda x: x
limiters = {}
if self.conf.limiters is not None:
limiters = self.conf.limiters
# what if we have more fields or limiters?
for field_name, limiter in limiters.items():
self.post_stage_hook = limiter(context.fields[field_name],
verbose=self.verbose)
def solve_step0(self, nls, vec0):
res = nls.fun(vec0)
err = nm.linalg.norm(res)
| output('initial residual: %e' % err, verbose=self.verbose) | sfepy.base.base.output |
"""
Explicit time stepping solvers for use with DG FEM
"""
import numpy as nm
import numpy.linalg as nla
# sfepy imports
from sfepy.base.base import get_default, output
from sfepy.solvers import TimeSteppingSolver
from sfepy.solvers.solvers import SolverMeta
from sfepy.solvers.ts import TimeStepper
from sfepy.solvers.ts_solvers import standard_ts_call
class DGMultiStageTSS(TimeSteppingSolver):
"""Explicit time stepping solver with multistage solve_step method"""
__metaclass__ = SolverMeta
name = "ts.multistaged"
_parameters = [
('t0', 'float', 0.0, False,
'The initial time.'),
('t1', 'float', 1.0, False,
'The final time.'),
('dt', 'float', None, False,
'The time step. Used if `n_step` is not given.'),
('n_step', 'int', 10, False,
'The number of time steps. Has precedence over `dt`.'),
# this option is required by TimeSteppingSolver constructor
('quasistatic', 'bool', False, False,
"""If True, assume a quasistatic time-stepping. Then the non-linear
solver is invoked also for the initial time."""),
('limiters', 'dictionary', None, None,
"Limiters for DGFields, keys: field name, values: limiter class"),
]
def __init__(self, conf, nls=None, context=None, **kwargs):
TimeSteppingSolver.__init__(self, conf, nls=nls, context=context,
**kwargs)
self.ts = TimeStepper.from_conf(self.conf)
nd = self.ts.n_digit
self.stage_format = '---- ' + \
self.name + ' stage {}: linear system sol error {}'+ \
' ----'
format = '\n\n====== time %%e (step %%%dd of %%%dd) =====' % (nd, nd)
self.format = format
self.verbose = self.conf.verbose
self.post_stage_hook = lambda x: x
limiters = {}
if self.conf.limiters is not None:
limiters = self.conf.limiters
# what if we have more fields or limiters?
for field_name, limiter in limiters.items():
self.post_stage_hook = limiter(context.fields[field_name],
verbose=self.verbose)
def solve_step0(self, nls, vec0):
res = nls.fun(vec0)
err = nm.linalg.norm(res)
output('initial residual: %e' % err, verbose=self.verbose)
vec = vec0.copy()
return vec
def solve_step(self, ts, nls, vec, prestep_fun=None, poststep_fun=None,
status=None):
raise NotImplementedError("Called abstract solver, use subclass.")
def output_step_info(self, ts):
output(self.format % (ts.time, ts.step + 1, ts.n_step),
verbose=self.verbose)
@standard_ts_call
def __call__(self, vec0=None, nls=None, init_fun=None, prestep_fun=None,
poststep_fun=None, status=None):
"""
Solve the time-dependent problem.
"""
ts = self.ts
nls = | get_default(nls, self.nls) | sfepy.base.base.get_default |
"""
Explicit time stepping solvers for use with DG FEM
"""
import numpy as nm
import numpy.linalg as nla
# sfepy imports
from sfepy.base.base import get_default, output
from sfepy.solvers import TimeSteppingSolver
from sfepy.solvers.solvers import SolverMeta
from sfepy.solvers.ts import TimeStepper
from sfepy.solvers.ts_solvers import standard_ts_call
class DGMultiStageTSS(TimeSteppingSolver):
"""Explicit time stepping solver with multistage solve_step method"""
__metaclass__ = SolverMeta
name = "ts.multistaged"
_parameters = [
('t0', 'float', 0.0, False,
'The initial time.'),
('t1', 'float', 1.0, False,
'The final time.'),
('dt', 'float', None, False,
'The time step. Used if `n_step` is not given.'),
('n_step', 'int', 10, False,
'The number of time steps. Has precedence over `dt`.'),
# this option is required by TimeSteppingSolver constructor
('quasistatic', 'bool', False, False,
"""If True, assume a quasistatic time-stepping. Then the non-linear
solver is invoked also for the initial time."""),
('limiters', 'dictionary', None, None,
"Limiters for DGFields, keys: field name, values: limiter class"),
]
def __init__(self, conf, nls=None, context=None, **kwargs):
TimeSteppingSolver.__init__(self, conf, nls=nls, context=context,
**kwargs)
self.ts = TimeStepper.from_conf(self.conf)
nd = self.ts.n_digit
self.stage_format = '---- ' + \
self.name + ' stage {}: linear system sol error {}'+ \
' ----'
format = '\n\n====== time %%e (step %%%dd of %%%dd) =====' % (nd, nd)
self.format = format
self.verbose = self.conf.verbose
self.post_stage_hook = lambda x: x
limiters = {}
if self.conf.limiters is not None:
limiters = self.conf.limiters
# what if we have more fields or limiters?
for field_name, limiter in limiters.items():
self.post_stage_hook = limiter(context.fields[field_name],
verbose=self.verbose)
def solve_step0(self, nls, vec0):
res = nls.fun(vec0)
err = nm.linalg.norm(res)
output('initial residual: %e' % err, verbose=self.verbose)
vec = vec0.copy()
return vec
def solve_step(self, ts, nls, vec, prestep_fun=None, poststep_fun=None,
status=None):
raise NotImplementedError("Called abstract solver, use subclass.")
def output_step_info(self, ts):
output(self.format % (ts.time, ts.step + 1, ts.n_step),
verbose=self.verbose)
@standard_ts_call
def __call__(self, vec0=None, nls=None, init_fun=None, prestep_fun=None,
poststep_fun=None, status=None):
"""
Solve the time-dependent problem.
"""
ts = self.ts
nls = get_default(nls, self.nls)
vec0 = init_fun(ts, vec0)
self.output_step_info(ts)
if ts.step == 0:
prestep_fun(ts, vec0)
vec = self.solve_step0(nls, vec0)
poststep_fun(ts, vec)
ts.advance()
else:
vec = vec0
for step, time in ts.iter_from(ts.step):
self.output_step_info(ts)
prestep_fun(ts, vec)
vect = self.solve_step(ts, nls, vec, prestep_fun, poststep_fun,
status)
poststep_fun(ts, vect)
vec = vect
return vec
class EulerStepSolver(DGMultiStageTSS):
"""Simple forward euler method"""
name = 'ts.euler'
__metaclass__ = SolverMeta
def solve_step(self, ts, nls, vec_x0, status=None,
prestep_fun=None, poststep_fun=None):
if ts is None:
raise ValueError("Provide TimeStepper to explicit Euler solver")
fun = nls.fun
fun_grad = nls.fun_grad
lin_solver = nls.lin_solver
ls_eps_a, ls_eps_r = lin_solver.get_tolerance()
eps_a = | get_default(ls_eps_a, 1.0) | sfepy.base.base.get_default |
"""
Explicit time stepping solvers for use with DG FEM
"""
import numpy as nm
import numpy.linalg as nla
# sfepy imports
from sfepy.base.base import get_default, output
from sfepy.solvers import TimeSteppingSolver
from sfepy.solvers.solvers import SolverMeta
from sfepy.solvers.ts import TimeStepper
from sfepy.solvers.ts_solvers import standard_ts_call
class DGMultiStageTSS(TimeSteppingSolver):
"""Explicit time stepping solver with multistage solve_step method"""
__metaclass__ = SolverMeta
name = "ts.multistaged"
_parameters = [
('t0', 'float', 0.0, False,
'The initial time.'),
('t1', 'float', 1.0, False,
'The final time.'),
('dt', 'float', None, False,
'The time step. Used if `n_step` is not given.'),
('n_step', 'int', 10, False,
'The number of time steps. Has precedence over `dt`.'),
# this option is required by TimeSteppingSolver constructor
('quasistatic', 'bool', False, False,
"""If True, assume a quasistatic time-stepping. Then the non-linear
solver is invoked also for the initial time."""),
('limiters', 'dictionary', None, None,
"Limiters for DGFields, keys: field name, values: limiter class"),
]
def __init__(self, conf, nls=None, context=None, **kwargs):
TimeSteppingSolver.__init__(self, conf, nls=nls, context=context,
**kwargs)
self.ts = TimeStepper.from_conf(self.conf)
nd = self.ts.n_digit
self.stage_format = '---- ' + \
self.name + ' stage {}: linear system sol error {}'+ \
' ----'
format = '\n\n====== time %%e (step %%%dd of %%%dd) =====' % (nd, nd)
self.format = format
self.verbose = self.conf.verbose
self.post_stage_hook = lambda x: x
limiters = {}
if self.conf.limiters is not None:
limiters = self.conf.limiters
# what if we have more fields or limiters?
for field_name, limiter in limiters.items():
self.post_stage_hook = limiter(context.fields[field_name],
verbose=self.verbose)
def solve_step0(self, nls, vec0):
res = nls.fun(vec0)
err = nm.linalg.norm(res)
output('initial residual: %e' % err, verbose=self.verbose)
vec = vec0.copy()
return vec
def solve_step(self, ts, nls, vec, prestep_fun=None, poststep_fun=None,
status=None):
raise NotImplementedError("Called abstract solver, use subclass.")
def output_step_info(self, ts):
output(self.format % (ts.time, ts.step + 1, ts.n_step),
verbose=self.verbose)
@standard_ts_call
def __call__(self, vec0=None, nls=None, init_fun=None, prestep_fun=None,
poststep_fun=None, status=None):
"""
Solve the time-dependent problem.
"""
ts = self.ts
nls = get_default(nls, self.nls)
vec0 = init_fun(ts, vec0)
self.output_step_info(ts)
if ts.step == 0:
prestep_fun(ts, vec0)
vec = self.solve_step0(nls, vec0)
poststep_fun(ts, vec)
ts.advance()
else:
vec = vec0
for step, time in ts.iter_from(ts.step):
self.output_step_info(ts)
prestep_fun(ts, vec)
vect = self.solve_step(ts, nls, vec, prestep_fun, poststep_fun,
status)
poststep_fun(ts, vect)
vec = vect
return vec
class EulerStepSolver(DGMultiStageTSS):
"""Simple forward euler method"""
name = 'ts.euler'
__metaclass__ = SolverMeta
def solve_step(self, ts, nls, vec_x0, status=None,
prestep_fun=None, poststep_fun=None):
if ts is None:
raise ValueError("Provide TimeStepper to explicit Euler solver")
fun = nls.fun
fun_grad = nls.fun_grad
lin_solver = nls.lin_solver
ls_eps_a, ls_eps_r = lin_solver.get_tolerance()
eps_a = get_default(ls_eps_a, 1.0)
eps_r = | get_default(ls_eps_r, 1.0) | sfepy.base.base.get_default |
"""
Explicit time stepping solvers for use with DG FEM
"""
import numpy as nm
import numpy.linalg as nla
# sfepy imports
from sfepy.base.base import get_default, output
from sfepy.solvers import TimeSteppingSolver
from sfepy.solvers.solvers import SolverMeta
from sfepy.solvers.ts import TimeStepper
from sfepy.solvers.ts_solvers import standard_ts_call
class DGMultiStageTSS(TimeSteppingSolver):
"""Explicit time stepping solver with multistage solve_step method"""
__metaclass__ = SolverMeta
name = "ts.multistaged"
_parameters = [
('t0', 'float', 0.0, False,
'The initial time.'),
('t1', 'float', 1.0, False,
'The final time.'),
('dt', 'float', None, False,
'The time step. Used if `n_step` is not given.'),
('n_step', 'int', 10, False,
'The number of time steps. Has precedence over `dt`.'),
# this option is required by TimeSteppingSolver constructor
('quasistatic', 'bool', False, False,
"""If True, assume a quasistatic time-stepping. Then the non-linear
solver is invoked also for the initial time."""),
('limiters', 'dictionary', None, None,
"Limiters for DGFields, keys: field name, values: limiter class"),
]
def __init__(self, conf, nls=None, context=None, **kwargs):
TimeSteppingSolver.__init__(self, conf, nls=nls, context=context,
**kwargs)
self.ts = TimeStepper.from_conf(self.conf)
nd = self.ts.n_digit
self.stage_format = '---- ' + \
self.name + ' stage {}: linear system sol error {}'+ \
' ----'
format = '\n\n====== time %%e (step %%%dd of %%%dd) =====' % (nd, nd)
self.format = format
self.verbose = self.conf.verbose
self.post_stage_hook = lambda x: x
limiters = {}
if self.conf.limiters is not None:
limiters = self.conf.limiters
# what if we have more fields or limiters?
for field_name, limiter in limiters.items():
self.post_stage_hook = limiter(context.fields[field_name],
verbose=self.verbose)
def solve_step0(self, nls, vec0):
res = nls.fun(vec0)
err = nm.linalg.norm(res)
output('initial residual: %e' % err, verbose=self.verbose)
vec = vec0.copy()
return vec
def solve_step(self, ts, nls, vec, prestep_fun=None, poststep_fun=None,
status=None):
raise NotImplementedError("Called abstract solver, use subclass.")
def output_step_info(self, ts):
output(self.format % (ts.time, ts.step + 1, ts.n_step),
verbose=self.verbose)
@standard_ts_call
def __call__(self, vec0=None, nls=None, init_fun=None, prestep_fun=None,
poststep_fun=None, status=None):
"""
Solve the time-dependent problem.
"""
ts = self.ts
nls = get_default(nls, self.nls)
vec0 = init_fun(ts, vec0)
self.output_step_info(ts)
if ts.step == 0:
prestep_fun(ts, vec0)
vec = self.solve_step0(nls, vec0)
poststep_fun(ts, vec)
ts.advance()
else:
vec = vec0
for step, time in ts.iter_from(ts.step):
self.output_step_info(ts)
prestep_fun(ts, vec)
vect = self.solve_step(ts, nls, vec, prestep_fun, poststep_fun,
status)
poststep_fun(ts, vect)
vec = vect
return vec
class EulerStepSolver(DGMultiStageTSS):
"""Simple forward euler method"""
name = 'ts.euler'
__metaclass__ = SolverMeta
def solve_step(self, ts, nls, vec_x0, status=None,
prestep_fun=None, poststep_fun=None):
if ts is None:
raise ValueError("Provide TimeStepper to explicit Euler solver")
fun = nls.fun
fun_grad = nls.fun_grad
lin_solver = nls.lin_solver
ls_eps_a, ls_eps_r = lin_solver.get_tolerance()
eps_a = get_default(ls_eps_a, 1.0)
eps_r = get_default(ls_eps_r, 1.0)
vec_x = vec_x0.copy()
vec_r = fun(vec_x)
mtx_a = fun_grad(vec_x)
ls_status = {}
vec_dx = lin_solver(vec_r, x0=vec_x,
eps_a=eps_a, eps_r=eps_r, mtx=mtx_a,
status=ls_status)
vec_e = mtx_a * vec_dx - vec_r
lerr = nla.norm(vec_e)
if self.verbose:
output(self.name + ' linear system sol error {}'.format(lerr))
output(self.name + ' mtx max {}, min {}, trace {}'
.format(mtx_a.max(), mtx_a.min(), nm.sum(mtx_a.diagonal())))
vec_x = vec_x - ts.dt * (vec_dx - vec_x)
vec_x = self.post_stage_hook(vec_x)
return vec_x
class TVDRK3StepSolver(DGMultiStageTSS):
r"""3rd order Total Variation Diminishing Runge-Kutta method
based on [1]_
.. math::
\begin{aligned}
\mathbf{p}^{(1)} &= \mathbf{p}^n - \Delta t
\bar{\mathcal{L}}(\mathbf{p}^n),\\
\mathbf{\mathbf{p}}^{(2)} &= \frac{3}{4}\mathbf{p}^n
+\frac{1}{4}\mathbf{p}^{(1)} - \frac{1}{4}\Delta t
\bar{\mathcal{L}}(\mathbf{p}^{(1)}),\\
\mathbf{p}^{(n+1)} &= \frac{1}{3}\mathbf{p}^n
+\frac{2}{3}\mathbf{p}^{(2)} - \frac{2}{3}\Delta t
\bar{\mathcal{L}}(\mathbf{p}^{(2)}).
\end{aligned}
.. [1] <NAME>., & <NAME>. (2002). Total variation diminishing Runge-Kutta
schemes. Mathematics of Computation of the American Mathematical Society,
67(221), 73–85. https://doi.org/10.1090/s0025-5718-98-00913-2
"""
name = 'ts.tvd_runge_kutta_3'
__metaclass__ = SolverMeta
def solve_step(self, ts, nls, vec_x0, status=None,
prestep_fun=None, poststep_fun=None):
if ts is None:
raise ValueError("Provide TimeStepper to explicit Runge-Kutta solver")
fun = nls.fun
fun_grad = nls.fun_grad
lin_solver = nls.lin_solver
ls_eps_a, ls_eps_r = lin_solver.get_tolerance()
eps_a = | get_default(ls_eps_a, 1.0) | sfepy.base.base.get_default |
"""
Explicit time stepping solvers for use with DG FEM
"""
import numpy as nm
import numpy.linalg as nla
# sfepy imports
from sfepy.base.base import get_default, output
from sfepy.solvers import TimeSteppingSolver
from sfepy.solvers.solvers import SolverMeta
from sfepy.solvers.ts import TimeStepper
from sfepy.solvers.ts_solvers import standard_ts_call
class DGMultiStageTSS(TimeSteppingSolver):
"""Explicit time stepping solver with multistage solve_step method"""
__metaclass__ = SolverMeta
name = "ts.multistaged"
_parameters = [
('t0', 'float', 0.0, False,
'The initial time.'),
('t1', 'float', 1.0, False,
'The final time.'),
('dt', 'float', None, False,
'The time step. Used if `n_step` is not given.'),
('n_step', 'int', 10, False,
'The number of time steps. Has precedence over `dt`.'),
# this option is required by TimeSteppingSolver constructor
('quasistatic', 'bool', False, False,
"""If True, assume a quasistatic time-stepping. Then the non-linear
solver is invoked also for the initial time."""),
('limiters', 'dictionary', None, None,
"Limiters for DGFields, keys: field name, values: limiter class"),
]
def __init__(self, conf, nls=None, context=None, **kwargs):
TimeSteppingSolver.__init__(self, conf, nls=nls, context=context,
**kwargs)
self.ts = TimeStepper.from_conf(self.conf)
nd = self.ts.n_digit
self.stage_format = '---- ' + \
self.name + ' stage {}: linear system sol error {}'+ \
' ----'
format = '\n\n====== time %%e (step %%%dd of %%%dd) =====' % (nd, nd)
self.format = format
self.verbose = self.conf.verbose
self.post_stage_hook = lambda x: x
limiters = {}
if self.conf.limiters is not None:
limiters = self.conf.limiters
# what if we have more fields or limiters?
for field_name, limiter in limiters.items():
self.post_stage_hook = limiter(context.fields[field_name],
verbose=self.verbose)
def solve_step0(self, nls, vec0):
res = nls.fun(vec0)
err = nm.linalg.norm(res)
output('initial residual: %e' % err, verbose=self.verbose)
vec = vec0.copy()
return vec
def solve_step(self, ts, nls, vec, prestep_fun=None, poststep_fun=None,
status=None):
raise NotImplementedError("Called abstract solver, use subclass.")
def output_step_info(self, ts):
output(self.format % (ts.time, ts.step + 1, ts.n_step),
verbose=self.verbose)
@standard_ts_call
def __call__(self, vec0=None, nls=None, init_fun=None, prestep_fun=None,
poststep_fun=None, status=None):
"""
Solve the time-dependent problem.
"""
ts = self.ts
nls = get_default(nls, self.nls)
vec0 = init_fun(ts, vec0)
self.output_step_info(ts)
if ts.step == 0:
prestep_fun(ts, vec0)
vec = self.solve_step0(nls, vec0)
poststep_fun(ts, vec)
ts.advance()
else:
vec = vec0
for step, time in ts.iter_from(ts.step):
self.output_step_info(ts)
prestep_fun(ts, vec)
vect = self.solve_step(ts, nls, vec, prestep_fun, poststep_fun,
status)
poststep_fun(ts, vect)
vec = vect
return vec
class EulerStepSolver(DGMultiStageTSS):
"""Simple forward euler method"""
name = 'ts.euler'
__metaclass__ = SolverMeta
def solve_step(self, ts, nls, vec_x0, status=None,
prestep_fun=None, poststep_fun=None):
if ts is None:
raise ValueError("Provide TimeStepper to explicit Euler solver")
fun = nls.fun
fun_grad = nls.fun_grad
lin_solver = nls.lin_solver
ls_eps_a, ls_eps_r = lin_solver.get_tolerance()
eps_a = get_default(ls_eps_a, 1.0)
eps_r = get_default(ls_eps_r, 1.0)
vec_x = vec_x0.copy()
vec_r = fun(vec_x)
mtx_a = fun_grad(vec_x)
ls_status = {}
vec_dx = lin_solver(vec_r, x0=vec_x,
eps_a=eps_a, eps_r=eps_r, mtx=mtx_a,
status=ls_status)
vec_e = mtx_a * vec_dx - vec_r
lerr = nla.norm(vec_e)
if self.verbose:
output(self.name + ' linear system sol error {}'.format(lerr))
output(self.name + ' mtx max {}, min {}, trace {}'
.format(mtx_a.max(), mtx_a.min(), nm.sum(mtx_a.diagonal())))
vec_x = vec_x - ts.dt * (vec_dx - vec_x)
vec_x = self.post_stage_hook(vec_x)
return vec_x
class TVDRK3StepSolver(DGMultiStageTSS):
r"""3rd order Total Variation Diminishing Runge-Kutta method
based on [1]_
.. math::
\begin{aligned}
\mathbf{p}^{(1)} &= \mathbf{p}^n - \Delta t
\bar{\mathcal{L}}(\mathbf{p}^n),\\
\mathbf{\mathbf{p}}^{(2)} &= \frac{3}{4}\mathbf{p}^n
+\frac{1}{4}\mathbf{p}^{(1)} - \frac{1}{4}\Delta t
\bar{\mathcal{L}}(\mathbf{p}^{(1)}),\\
\mathbf{p}^{(n+1)} &= \frac{1}{3}\mathbf{p}^n
+\frac{2}{3}\mathbf{p}^{(2)} - \frac{2}{3}\Delta t
\bar{\mathcal{L}}(\mathbf{p}^{(2)}).
\end{aligned}
.. [1] <NAME>., & <NAME>. (2002). Total variation diminishing Runge-Kutta
schemes. Mathematics of Computation of the American Mathematical Society,
67(221), 73–85. https://doi.org/10.1090/s0025-5718-98-00913-2
"""
name = 'ts.tvd_runge_kutta_3'
__metaclass__ = SolverMeta
def solve_step(self, ts, nls, vec_x0, status=None,
prestep_fun=None, poststep_fun=None):
if ts is None:
raise ValueError("Provide TimeStepper to explicit Runge-Kutta solver")
fun = nls.fun
fun_grad = nls.fun_grad
lin_solver = nls.lin_solver
ls_eps_a, ls_eps_r = lin_solver.get_tolerance()
eps_a = get_default(ls_eps_a, 1.0)
eps_r = | get_default(ls_eps_r, 1.0) | sfepy.base.base.get_default |
"""
Explicit time stepping solvers for use with DG FEM
"""
import numpy as nm
import numpy.linalg as nla
# sfepy imports
from sfepy.base.base import get_default, output
from sfepy.solvers import TimeSteppingSolver
from sfepy.solvers.solvers import SolverMeta
from sfepy.solvers.ts import TimeStepper
from sfepy.solvers.ts_solvers import standard_ts_call
class DGMultiStageTSS(TimeSteppingSolver):
"""Explicit time stepping solver with multistage solve_step method"""
__metaclass__ = SolverMeta
name = "ts.multistaged"
_parameters = [
('t0', 'float', 0.0, False,
'The initial time.'),
('t1', 'float', 1.0, False,
'The final time.'),
('dt', 'float', None, False,
'The time step. Used if `n_step` is not given.'),
('n_step', 'int', 10, False,
'The number of time steps. Has precedence over `dt`.'),
# this option is required by TimeSteppingSolver constructor
('quasistatic', 'bool', False, False,
"""If True, assume a quasistatic time-stepping. Then the non-linear
solver is invoked also for the initial time."""),
('limiters', 'dictionary', None, None,
"Limiters for DGFields, keys: field name, values: limiter class"),
]
def __init__(self, conf, nls=None, context=None, **kwargs):
TimeSteppingSolver.__init__(self, conf, nls=nls, context=context,
**kwargs)
self.ts = TimeStepper.from_conf(self.conf)
nd = self.ts.n_digit
self.stage_format = '---- ' + \
self.name + ' stage {}: linear system sol error {}'+ \
' ----'
format = '\n\n====== time %%e (step %%%dd of %%%dd) =====' % (nd, nd)
self.format = format
self.verbose = self.conf.verbose
self.post_stage_hook = lambda x: x
limiters = {}
if self.conf.limiters is not None:
limiters = self.conf.limiters
# what if we have more fields or limiters?
for field_name, limiter in limiters.items():
self.post_stage_hook = limiter(context.fields[field_name],
verbose=self.verbose)
def solve_step0(self, nls, vec0):
res = nls.fun(vec0)
err = nm.linalg.norm(res)
output('initial residual: %e' % err, verbose=self.verbose)
vec = vec0.copy()
return vec
def solve_step(self, ts, nls, vec, prestep_fun=None, poststep_fun=None,
status=None):
raise NotImplementedError("Called abstract solver, use subclass.")
def output_step_info(self, ts):
output(self.format % (ts.time, ts.step + 1, ts.n_step),
verbose=self.verbose)
@standard_ts_call
def __call__(self, vec0=None, nls=None, init_fun=None, prestep_fun=None,
poststep_fun=None, status=None):
"""
Solve the time-dependent problem.
"""
ts = self.ts
nls = get_default(nls, self.nls)
vec0 = init_fun(ts, vec0)
self.output_step_info(ts)
if ts.step == 0:
prestep_fun(ts, vec0)
vec = self.solve_step0(nls, vec0)
poststep_fun(ts, vec)
ts.advance()
else:
vec = vec0
for step, time in ts.iter_from(ts.step):
self.output_step_info(ts)
prestep_fun(ts, vec)
vect = self.solve_step(ts, nls, vec, prestep_fun, poststep_fun,
status)
poststep_fun(ts, vect)
vec = vect
return vec
class EulerStepSolver(DGMultiStageTSS):
"""Simple forward euler method"""
name = 'ts.euler'
__metaclass__ = SolverMeta
def solve_step(self, ts, nls, vec_x0, status=None,
prestep_fun=None, poststep_fun=None):
if ts is None:
raise ValueError("Provide TimeStepper to explicit Euler solver")
fun = nls.fun
fun_grad = nls.fun_grad
lin_solver = nls.lin_solver
ls_eps_a, ls_eps_r = lin_solver.get_tolerance()
eps_a = get_default(ls_eps_a, 1.0)
eps_r = get_default(ls_eps_r, 1.0)
vec_x = vec_x0.copy()
vec_r = fun(vec_x)
mtx_a = fun_grad(vec_x)
ls_status = {}
vec_dx = lin_solver(vec_r, x0=vec_x,
eps_a=eps_a, eps_r=eps_r, mtx=mtx_a,
status=ls_status)
vec_e = mtx_a * vec_dx - vec_r
lerr = nla.norm(vec_e)
if self.verbose:
output(self.name + ' linear system sol error {}'.format(lerr))
output(self.name + ' mtx max {}, min {}, trace {}'
.format(mtx_a.max(), mtx_a.min(), nm.sum(mtx_a.diagonal())))
vec_x = vec_x - ts.dt * (vec_dx - vec_x)
vec_x = self.post_stage_hook(vec_x)
return vec_x
class TVDRK3StepSolver(DGMultiStageTSS):
r"""3rd order Total Variation Diminishing Runge-Kutta method
based on [1]_
.. math::
\begin{aligned}
\mathbf{p}^{(1)} &= \mathbf{p}^n - \Delta t
\bar{\mathcal{L}}(\mathbf{p}^n),\\
\mathbf{\mathbf{p}}^{(2)} &= \frac{3}{4}\mathbf{p}^n
+\frac{1}{4}\mathbf{p}^{(1)} - \frac{1}{4}\Delta t
\bar{\mathcal{L}}(\mathbf{p}^{(1)}),\\
\mathbf{p}^{(n+1)} &= \frac{1}{3}\mathbf{p}^n
+\frac{2}{3}\mathbf{p}^{(2)} - \frac{2}{3}\Delta t
\bar{\mathcal{L}}(\mathbf{p}^{(2)}).
\end{aligned}
.. [1] <NAME>., & <NAME>. (2002). Total variation diminishing Runge-Kutta
schemes. Mathematics of Computation of the American Mathematical Society,
67(221), 73–85. https://doi.org/10.1090/s0025-5718-98-00913-2
"""
name = 'ts.tvd_runge_kutta_3'
__metaclass__ = SolverMeta
def solve_step(self, ts, nls, vec_x0, status=None,
prestep_fun=None, poststep_fun=None):
if ts is None:
raise ValueError("Provide TimeStepper to explicit Runge-Kutta solver")
fun = nls.fun
fun_grad = nls.fun_grad
lin_solver = nls.lin_solver
ls_eps_a, ls_eps_r = lin_solver.get_tolerance()
eps_a = get_default(ls_eps_a, 1.0)
eps_r = get_default(ls_eps_r, 1.0)
ls_status = {}
# ----1st stage----
vec_x = vec_x0.copy()
vec_r = fun(vec_x)
mtx_a = fun_grad(vec_x)
vec_dx = lin_solver(vec_r, x0=vec_x,
eps_a=eps_a, eps_r=eps_r, mtx=mtx_a,
status=ls_status)
vec_x1 = vec_x - ts.dt * (vec_dx - vec_x)
vec_e = mtx_a * vec_dx - vec_r
lerr = nla.norm(vec_e)
if self.verbose:
output(self.stage_format.format(1, lerr))
vec_x1 = self.post_stage_hook(vec_x1)
# ----2nd stage----
vec_r = fun(vec_x1)
mtx_a = fun_grad(vec_x1)
vec_dx = lin_solver(vec_r, x0=vec_x1,
eps_a=eps_a, eps_r=eps_r, mtx=mtx_a,
status=ls_status)
vec_x2 = (3 * vec_x + vec_x1 - ts.dt * (vec_dx - vec_x1)) / 4
vec_e = mtx_a * vec_dx - vec_r
lerr = nla.norm(vec_e)
if self.verbose:
output(self.stage_format.format(2, lerr))
vec_x2 = self.post_stage_hook(vec_x2)
# ----3rd stage-----
ts.set_substep_time(1. / 2. * ts.dt)
prestep_fun(ts, vec_x2)
vec_r = fun(vec_x2)
mtx_a = fun_grad(vec_x2)
vec_dx = lin_solver(vec_r, x0=vec_x2,
eps_a=eps_a, eps_r=eps_r, mtx=mtx_a,
status=ls_status)
vec_x3 = (vec_x + 2 * vec_x2 - 2 * ts.dt * (vec_dx - vec_x2)) / 3
vec_e = mtx_a * vec_dx - vec_r
lerr = nla.norm(vec_e)
if self.verbose:
output(self.stage_format.format(3, lerr))
vec_x3 = self.post_stage_hook(vec_x3)
return vec_x3
class RK4StepSolver(DGMultiStageTSS):
"""Classical 4th order Runge-Kutta method,
implemetantions is based on [1]_
.. [1] <NAME>., & <NAME>. (2008). Nodal Discontinuous Galerkin Methods.
Journal of Physics A: Mathematical and Theoretical (Vol. 54). New York,
NY: Springer New York. http://doi.org/10.1007/978-0-387-72067-8, p. 63
"""
name = 'ts.runge_kutta_4'
__metaclass__ = SolverMeta
stage_updates = (
lambda u, k_, dt: u,
lambda u, k1, dt: u + 1. / 2. * dt * k1,
lambda u, k2, dt: u + 1. / 2. * dt * k2,
lambda u, k3, dt: u + dt * k3
)
def solve_step(self, ts, nls, vec_x0, status=None,
prestep_fun=None, poststep_fun=None):
if ts is None:
raise ValueError("Provide TimeStepper to explicit Runge-Kutta solver")
fun = nls.fun
fun_grad = nls.fun_grad
lin_solver = nls.lin_solver
ls_eps_a, ls_eps_r = lin_solver.get_tolerance()
eps_a = | get_default(ls_eps_a, 1.0) | sfepy.base.base.get_default |
"""
Explicit time stepping solvers for use with DG FEM
"""
import numpy as nm
import numpy.linalg as nla
# sfepy imports
from sfepy.base.base import get_default, output
from sfepy.solvers import TimeSteppingSolver
from sfepy.solvers.solvers import SolverMeta
from sfepy.solvers.ts import TimeStepper
from sfepy.solvers.ts_solvers import standard_ts_call
class DGMultiStageTSS(TimeSteppingSolver):
"""Explicit time stepping solver with multistage solve_step method"""
__metaclass__ = SolverMeta
name = "ts.multistaged"
_parameters = [
('t0', 'float', 0.0, False,
'The initial time.'),
('t1', 'float', 1.0, False,
'The final time.'),
('dt', 'float', None, False,
'The time step. Used if `n_step` is not given.'),
('n_step', 'int', 10, False,
'The number of time steps. Has precedence over `dt`.'),
# this option is required by TimeSteppingSolver constructor
('quasistatic', 'bool', False, False,
"""If True, assume a quasistatic time-stepping. Then the non-linear
solver is invoked also for the initial time."""),
('limiters', 'dictionary', None, None,
"Limiters for DGFields, keys: field name, values: limiter class"),
]
def __init__(self, conf, nls=None, context=None, **kwargs):
TimeSteppingSolver.__init__(self, conf, nls=nls, context=context,
**kwargs)
self.ts = TimeStepper.from_conf(self.conf)
nd = self.ts.n_digit
self.stage_format = '---- ' + \
self.name + ' stage {}: linear system sol error {}'+ \
' ----'
format = '\n\n====== time %%e (step %%%dd of %%%dd) =====' % (nd, nd)
self.format = format
self.verbose = self.conf.verbose
self.post_stage_hook = lambda x: x
limiters = {}
if self.conf.limiters is not None:
limiters = self.conf.limiters
# what if we have more fields or limiters?
for field_name, limiter in limiters.items():
self.post_stage_hook = limiter(context.fields[field_name],
verbose=self.verbose)
def solve_step0(self, nls, vec0):
res = nls.fun(vec0)
err = nm.linalg.norm(res)
output('initial residual: %e' % err, verbose=self.verbose)
vec = vec0.copy()
return vec
def solve_step(self, ts, nls, vec, prestep_fun=None, poststep_fun=None,
status=None):
raise NotImplementedError("Called abstract solver, use subclass.")
def output_step_info(self, ts):
output(self.format % (ts.time, ts.step + 1, ts.n_step),
verbose=self.verbose)
@standard_ts_call
def __call__(self, vec0=None, nls=None, init_fun=None, prestep_fun=None,
poststep_fun=None, status=None):
"""
Solve the time-dependent problem.
"""
ts = self.ts
nls = get_default(nls, self.nls)
vec0 = init_fun(ts, vec0)
self.output_step_info(ts)
if ts.step == 0:
prestep_fun(ts, vec0)
vec = self.solve_step0(nls, vec0)
poststep_fun(ts, vec)
ts.advance()
else:
vec = vec0
for step, time in ts.iter_from(ts.step):
self.output_step_info(ts)
prestep_fun(ts, vec)
vect = self.solve_step(ts, nls, vec, prestep_fun, poststep_fun,
status)
poststep_fun(ts, vect)
vec = vect
return vec
class EulerStepSolver(DGMultiStageTSS):
"""Simple forward euler method"""
name = 'ts.euler'
__metaclass__ = SolverMeta
def solve_step(self, ts, nls, vec_x0, status=None,
prestep_fun=None, poststep_fun=None):
if ts is None:
raise ValueError("Provide TimeStepper to explicit Euler solver")
fun = nls.fun
fun_grad = nls.fun_grad
lin_solver = nls.lin_solver
ls_eps_a, ls_eps_r = lin_solver.get_tolerance()
eps_a = get_default(ls_eps_a, 1.0)
eps_r = get_default(ls_eps_r, 1.0)
vec_x = vec_x0.copy()
vec_r = fun(vec_x)
mtx_a = fun_grad(vec_x)
ls_status = {}
vec_dx = lin_solver(vec_r, x0=vec_x,
eps_a=eps_a, eps_r=eps_r, mtx=mtx_a,
status=ls_status)
vec_e = mtx_a * vec_dx - vec_r
lerr = nla.norm(vec_e)
if self.verbose:
output(self.name + ' linear system sol error {}'.format(lerr))
output(self.name + ' mtx max {}, min {}, trace {}'
.format(mtx_a.max(), mtx_a.min(), nm.sum(mtx_a.diagonal())))
vec_x = vec_x - ts.dt * (vec_dx - vec_x)
vec_x = self.post_stage_hook(vec_x)
return vec_x
class TVDRK3StepSolver(DGMultiStageTSS):
r"""3rd order Total Variation Diminishing Runge-Kutta method
based on [1]_
.. math::
\begin{aligned}
\mathbf{p}^{(1)} &= \mathbf{p}^n - \Delta t
\bar{\mathcal{L}}(\mathbf{p}^n),\\
\mathbf{\mathbf{p}}^{(2)} &= \frac{3}{4}\mathbf{p}^n
+\frac{1}{4}\mathbf{p}^{(1)} - \frac{1}{4}\Delta t
\bar{\mathcal{L}}(\mathbf{p}^{(1)}),\\
\mathbf{p}^{(n+1)} &= \frac{1}{3}\mathbf{p}^n
+\frac{2}{3}\mathbf{p}^{(2)} - \frac{2}{3}\Delta t
\bar{\mathcal{L}}(\mathbf{p}^{(2)}).
\end{aligned}
.. [1] <NAME>., & <NAME>. (2002). Total variation diminishing Runge-Kutta
schemes. Mathematics of Computation of the American Mathematical Society,
67(221), 73–85. https://doi.org/10.1090/s0025-5718-98-00913-2
"""
name = 'ts.tvd_runge_kutta_3'
__metaclass__ = SolverMeta
def solve_step(self, ts, nls, vec_x0, status=None,
prestep_fun=None, poststep_fun=None):
if ts is None:
raise ValueError("Provide TimeStepper to explicit Runge-Kutta solver")
fun = nls.fun
fun_grad = nls.fun_grad
lin_solver = nls.lin_solver
ls_eps_a, ls_eps_r = lin_solver.get_tolerance()
eps_a = get_default(ls_eps_a, 1.0)
eps_r = get_default(ls_eps_r, 1.0)
ls_status = {}
# ----1st stage----
vec_x = vec_x0.copy()
vec_r = fun(vec_x)
mtx_a = fun_grad(vec_x)
vec_dx = lin_solver(vec_r, x0=vec_x,
eps_a=eps_a, eps_r=eps_r, mtx=mtx_a,
status=ls_status)
vec_x1 = vec_x - ts.dt * (vec_dx - vec_x)
vec_e = mtx_a * vec_dx - vec_r
lerr = nla.norm(vec_e)
if self.verbose:
output(self.stage_format.format(1, lerr))
vec_x1 = self.post_stage_hook(vec_x1)
# ----2nd stage----
vec_r = fun(vec_x1)
mtx_a = fun_grad(vec_x1)
vec_dx = lin_solver(vec_r, x0=vec_x1,
eps_a=eps_a, eps_r=eps_r, mtx=mtx_a,
status=ls_status)
vec_x2 = (3 * vec_x + vec_x1 - ts.dt * (vec_dx - vec_x1)) / 4
vec_e = mtx_a * vec_dx - vec_r
lerr = nla.norm(vec_e)
if self.verbose:
output(self.stage_format.format(2, lerr))
vec_x2 = self.post_stage_hook(vec_x2)
# ----3rd stage-----
ts.set_substep_time(1. / 2. * ts.dt)
prestep_fun(ts, vec_x2)
vec_r = fun(vec_x2)
mtx_a = fun_grad(vec_x2)
vec_dx = lin_solver(vec_r, x0=vec_x2,
eps_a=eps_a, eps_r=eps_r, mtx=mtx_a,
status=ls_status)
vec_x3 = (vec_x + 2 * vec_x2 - 2 * ts.dt * (vec_dx - vec_x2)) / 3
vec_e = mtx_a * vec_dx - vec_r
lerr = nla.norm(vec_e)
if self.verbose:
output(self.stage_format.format(3, lerr))
vec_x3 = self.post_stage_hook(vec_x3)
return vec_x3
class RK4StepSolver(DGMultiStageTSS):
"""Classical 4th order Runge-Kutta method,
implemetantions is based on [1]_
.. [1] <NAME>., & <NAME>. (2008). Nodal Discontinuous Galerkin Methods.
Journal of Physics A: Mathematical and Theoretical (Vol. 54). New York,
NY: Springer New York. http://doi.org/10.1007/978-0-387-72067-8, p. 63
"""
name = 'ts.runge_kutta_4'
__metaclass__ = SolverMeta
stage_updates = (
lambda u, k_, dt: u,
lambda u, k1, dt: u + 1. / 2. * dt * k1,
lambda u, k2, dt: u + 1. / 2. * dt * k2,
lambda u, k3, dt: u + dt * k3
)
def solve_step(self, ts, nls, vec_x0, status=None,
prestep_fun=None, poststep_fun=None):
if ts is None:
raise ValueError("Provide TimeStepper to explicit Runge-Kutta solver")
fun = nls.fun
fun_grad = nls.fun_grad
lin_solver = nls.lin_solver
ls_eps_a, ls_eps_r = lin_solver.get_tolerance()
eps_a = get_default(ls_eps_a, 1.0)
eps_r = | get_default(ls_eps_r, 1.0) | sfepy.base.base.get_default |
import numpy as nm
from sfepy.terms.terms import Term, terms
_msg_missing_data = 'missing family data!'
class HyperElasticBase(Term):
"""
Base class for all hyperelastic terms in TL/UL formulation.
`HyperElasticBase.__call__()` computes element contributions given either
stress (-> rezidual) or tangent modulus (-> tangent sitffnes matrix),
i.e. constitutive relation type (CRT) related data. The CRT data are
computed in subclasses implementing particular CRT (e.g. neo-Hookean
material), in self.compute_crt_data().
Modes:
- 0: total formulation
- 1: updated formulation
Notes
-----
This is not a proper Term!
"""
arg_types = ('material', 'virtual', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', 'state'),
'state' : 'D'}
@staticmethod
def integrate(out, val_qp, vg, fmode):
if fmode == 2:
out[:] = val_qp
status = 0
else:
status = vg.integrate(out, val_qp, fmode)
return status
@staticmethod
def function(out, fun, *args):
return fun(out, *args)
def __init__(self, *args, **kwargs):
| Term.__init__(self, *args, **kwargs) | sfepy.terms.terms.Term.__init__ |
import numpy as nm
from sfepy.terms.terms import Term, terms
_msg_missing_data = 'missing family data!'
class HyperElasticBase(Term):
"""
Base class for all hyperelastic terms in TL/UL formulation.
`HyperElasticBase.__call__()` computes element contributions given either
stress (-> rezidual) or tangent modulus (-> tangent sitffnes matrix),
i.e. constitutive relation type (CRT) related data. The CRT data are
computed in subclasses implementing particular CRT (e.g. neo-Hookean
material), in self.compute_crt_data().
Modes:
- 0: total formulation
- 1: updated formulation
Notes
-----
This is not a proper Term!
"""
arg_types = ('material', 'virtual', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', 'state'),
'state' : 'D'}
@staticmethod
def integrate(out, val_qp, vg, fmode):
if fmode == 2:
out[:] = val_qp
status = 0
else:
status = vg.integrate(out, val_qp, fmode)
return status
@staticmethod
def function(out, fun, *args):
return fun(out, *args)
def __init__(self, *args, **kwargs):
Term.__init__(self, *args, **kwargs)
self.stress_cache = None
def get_family_data(self, state, cache_name, data_names):
"""
Notes
-----
`data_names` argument is ignored for now.
"""
name = state.name
step_cache = state.evaluate_cache.setdefault(cache_name, {})
cache = step_cache.setdefault(self.arg_steps[name], {})
vg, _, key = self.get_mapping(state, return_key=True)
data_key = key + (self.arg_derivatives[name],)
if data_key in cache:
out = cache[data_key]
else:
out = self.compute_family_data(state)
cache[data_key] = out
return out
def compute_stress(self, mat, family_data, **kwargs):
out = nm.empty_like(family_data.green_strain)
get = family_data.get
fargs = [get(name, msg_if_none=_msg_missing_data)
for name in self.family_data_names]
self.stress_function(out, mat, *fargs, **kwargs)
return out
def compute_tan_mod(self, mat, family_data, **kwargs):
shape = list(family_data.green_strain.shape)
shape[-1] = shape[-2]
out = nm.empty(shape, dtype=nm.float64)
get = family_data.get
fargs = [get(name, msg_if_none=_msg_missing_data)
for name in self.family_data_names]
self.tan_mod_function(out, mat, *fargs, **kwargs)
return out
def get_fargs(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
fd = self.get_family_data(state, self.fd_cache_name,
self.family_data_names)
if mode == 'weak':
if diff_var is None:
stress = self.compute_stress(mat, fd, **kwargs)
self.stress_cache = stress
tan_mod = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 0
else:
stress = self.stress_cache
if stress is None:
stress = self.compute_stress(mat, fd, **kwargs)
tan_mod = self.compute_tan_mod(mat, fd, **kwargs)
fmode = 1
return (self.weak_function,
stress, tan_mod, fd.mtx_f, fd.det_f, vg, fmode,
self.hyperelastic_mode)
elif mode in ('el_avg', 'qp'):
if term_mode == 'strain':
out_qp = fd.green_strain
elif term_mode == 'stress':
out_qp = self.compute_stress(mat, fd, **kwargs)
else:
raise ValueError('unsupported term mode in %s! (%s)'
% (self.name, term_mode))
fmode = {'el_avg' : 1, 'qp' : 2}[mode]
return self.integrate, out_qp, vg, fmode
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
sym = dim * (dim + 1) / 2
if mode != 'qp':
n_qp = 1
return (n_el, n_qp, sym, 1), state.dtype
class DeformationGradientTerm(Term):
r"""
Deformation gradient :math:`\ull{F}` in quadrature points for
`term_mode='def_grad'` (default) or the jacobian :math:`J` if
`term_mode='jacobian'`.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\ull{F} = \pdiff{\ul{x}}{\ul{X}}|_{qp}
= \ull{I} + \pdiff{\ul{u}}{\ul{X}}|_{qp} \;, \\
\ul{x} = \ul{X} + \ul{u} \;, J = \det{(\ull{F})}
:Arguments:
- parameter : :math:`\ul{u}`
"""
name = 'ev_def_grad'
arg_types = ('parameter',)
arg_shapes = {'parameter' : 'D'}
@staticmethod
def function(out, vec, vg, econn, term_mode, fmode):
d = 1 if term_mode == 'jacobian' else vg.dim
out_qp = nm.empty((out.shape[0], vg.n_qp, d, d), dtype=out.dtype)
mode = 1 if term_mode == 'jacobian' else 0
| terms.dq_def_grad(out_qp, vec, vg, econn, mode) | sfepy.terms.terms.terms.dq_def_grad |
#!/usr/bin/env python
"""
Plot logs of variables saved in a text file by sfepy.base.log.Log class.
The plot should be almost the same as the plot that would be generated by the
Log directly.
"""
from __future__ import absolute_import
import sys
sys.path.append('.')
from argparse import ArgumentParser, Action, RawDescriptionHelpFormatter
import matplotlib.pyplot as plt
from sfepy.base.log import read_log, plot_log
class ParseRc(Action):
def __call__(self, parser, namespace, values, option_string=None):
pars = eval('{' + values + '}')
setattr(namespace, self.dest, pars)
helps = {
'groups' :
'list of log data groups subplots (from 0) to plot - all groups are'
' plotted if not given',
'output_filename' :
'save the figure using the given file name',
'rc' : 'matplotlib resources',
'no_legends' :
'do not show legends in the log plots',
'nbins' :
'the numbers of bins in x, y axes for all groups [default: %(default)s]',
'swap_axes' :
'swap the axes of the plots',
'no_show' :
'do not show the figure',
}
def main():
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('-g', '--groups', metavar='int[,int,...]',
action='store', dest='groups',
default=None, help=helps['groups'])
parser.add_argument('-o', '--output', metavar='filename',
action='store', dest='output_filename',
default=None, help=helps['output_filename'])
parser.add_argument('--rc', type=str, metavar='key:val,...',
action=ParseRc, dest='rc',
default={}, help=helps['rc'])
parser.add_argument('--no-legends',
action='store_false', dest='show_legends',
default=True, help=helps['no_legends'])
parser.add_argument('--nbins', metavar='nx1,ny1,...',
action='store', dest='nbins',
default=None, help=helps['nbins'])
parser.add_argument('--swap-axes',
action='store_true', dest='swap_axes',
default=False, help=helps['swap_axes'])
parser.add_argument('-n', '--no-show',
action='store_true', dest='no_show',
default=False, help=helps['no_show'])
parser.add_argument('filename')
options = parser.parse_args()
filename = options.filename
if options.groups is not None:
options.groups = [int(ii) for ii in options.groups.split(',')]
if options.nbins is not None:
aux = [int(ii) if ii != 'None' else None
for ii in options.nbins.split(',')]
xnbins, ynbins = aux[::2], aux[1::2]
else:
xnbins = ynbins = None
log, info = | read_log(filename) | sfepy.base.log.read_log |
"""
Acoustic band gaps in a strongly heterogeneous elastic body, detected using
homogenization techniques.
A reference periodic cell contains two domains: the stiff matrix :math:`Y_m`
and the soft (but heavy) inclusion :math:`Y_c`.
"""
from sfepy import data_dir
from sfepy.base.base import Struct
from sfepy.base.ioutils import InDir
from sfepy.homogenization.coefficients import Coefficients
from band_gaps_conf import BandGapsConf, get_pars, clip_sqrt, normalize
clip_sqrt, normalize # Make pyflakes happy...
incwd = | InDir(__file__) | sfepy.base.ioutils.InDir |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = | get_default(conf, self.conf) | sfepy.base.base.get_default |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = | get_default(mtx, self.mtx) | sfepy.base.base.get_default |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = | get_default(status, self.status) | sfepy.base.base.get_default |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
| assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0]) | sfepy.base.base.assert_ |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = | make_get_conf(conf, kwargs) | sfepy.solvers.solvers.make_get_conf |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = | LinearSolver.process_conf(conf) | sfepy.solvers.solvers.LinearSolver.process_conf |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
| LinearSolver.__init__(self, conf, **kwargs) | sfepy.solvers.solvers.LinearSolver.__init__ |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = | make_get_conf(conf, kwargs) | sfepy.solvers.solvers.make_get_conf |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = | LinearSolver.process_conf(conf) | sfepy.solvers.solvers.LinearSolver.process_conf |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
| LinearSolver.__init__(self, conf, **kwargs) | sfepy.solvers.solvers.LinearSolver.__init__ |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = | get_default(eps_r, self.conf.eps_r) | sfepy.base.base.get_default |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = | get_default(i_max, self.conf.i_max) | sfepy.base.base.get_default |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = | make_get_conf(conf, kwargs) | sfepy.solvers.solvers.make_get_conf |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = | LinearSolver.process_conf(conf) | sfepy.solvers.solvers.LinearSolver.process_conf |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
| LinearSolver.__init__(self, conf, mg=None, **kwargs) | sfepy.solvers.solvers.LinearSolver.__init__ |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = | get_default(eps_r, self.conf.eps_r) | sfepy.base.base.get_default |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = | make_get_conf(conf, kwargs) | sfepy.solvers.solvers.make_get_conf |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = | LinearSolver.process_conf(conf) | sfepy.solvers.solvers.LinearSolver.process_conf |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
| LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs) | sfepy.solvers.solvers.LinearSolver.__init__ |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs)
ksp = PETSc.KSP().create()
ksp.setType( self.conf.method )
ksp.getPC().setType( self.conf.precond )
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
self.ksp = ksp
self.converged_reasons = {}
for key, val in ksp.ConvergedReason.__dict__.iteritems():
if isinstance(val, int):
self.converged_reasons[val] = key
def set_matrix( self, mtx ):
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat().createAIJ( mtx.shape,
csr = (mtx.indptr,
mtx.indices,
mtx.data) )
sol, rhs = pmtx.getVecs()
return pmtx, sol, rhs
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_a = | get_default(eps_a, self.conf.eps_a) | sfepy.base.base.get_default |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs)
ksp = PETSc.KSP().create()
ksp.setType( self.conf.method )
ksp.getPC().setType( self.conf.precond )
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
self.ksp = ksp
self.converged_reasons = {}
for key, val in ksp.ConvergedReason.__dict__.iteritems():
if isinstance(val, int):
self.converged_reasons[val] = key
def set_matrix( self, mtx ):
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat().createAIJ( mtx.shape,
csr = (mtx.indptr,
mtx.indices,
mtx.data) )
sol, rhs = pmtx.getVecs()
return pmtx, sol, rhs
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = | get_default(eps_r, self.conf.eps_r) | sfepy.base.base.get_default |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs)
ksp = PETSc.KSP().create()
ksp.setType( self.conf.method )
ksp.getPC().setType( self.conf.precond )
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
self.ksp = ksp
self.converged_reasons = {}
for key, val in ksp.ConvergedReason.__dict__.iteritems():
if isinstance(val, int):
self.converged_reasons[val] = key
def set_matrix( self, mtx ):
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat().createAIJ( mtx.shape,
csr = (mtx.indptr,
mtx.indices,
mtx.data) )
sol, rhs = pmtx.getVecs()
return pmtx, sol, rhs
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = | get_default(i_max, self.conf.i_max) | sfepy.base.base.get_default |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs)
ksp = PETSc.KSP().create()
ksp.setType( self.conf.method )
ksp.getPC().setType( self.conf.precond )
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
self.ksp = ksp
self.converged_reasons = {}
for key, val in ksp.ConvergedReason.__dict__.iteritems():
if isinstance(val, int):
self.converged_reasons[val] = key
def set_matrix( self, mtx ):
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat().createAIJ( mtx.shape,
csr = (mtx.indptr,
mtx.indices,
mtx.data) )
sol, rhs = pmtx.getVecs()
return pmtx, sol, rhs
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
ksp.setInitialGuessNonzero(True)
prhs[...] = rhs
ksp.solve(prhs, psol)
sol = psol[...].copy()
output('%s(%s) convergence: %s (%s)'
% (self.conf.method, self.conf.precond,
ksp.reason, self.converged_reasons[ksp.reason]))
return sol
class PETScParallelKrylovSolver(PETScKrylovSolver):
"""
PETSc Krylov subspace solver able to run in parallel by storing the
system to disk and running a separate script via `mpiexec`.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc_parallel'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1 = {
'name' : 'ls',
'kind' : 'ls.petsc_parallel',
'log_dir' : '.', # Store logs here.
'n_proc' : 5, # Number of processes to run.
'method' : 'cg', # ksp_type
'precond' : 'bjacobi', # pc_type
'sub_precond' : 'icc', # sub_pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = | make_get_conf(conf, kwargs) | sfepy.solvers.solvers.make_get_conf |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs)
ksp = PETSc.KSP().create()
ksp.setType( self.conf.method )
ksp.getPC().setType( self.conf.precond )
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
self.ksp = ksp
self.converged_reasons = {}
for key, val in ksp.ConvergedReason.__dict__.iteritems():
if isinstance(val, int):
self.converged_reasons[val] = key
def set_matrix( self, mtx ):
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat().createAIJ( mtx.shape,
csr = (mtx.indptr,
mtx.indices,
mtx.data) )
sol, rhs = pmtx.getVecs()
return pmtx, sol, rhs
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
ksp.setInitialGuessNonzero(True)
prhs[...] = rhs
ksp.solve(prhs, psol)
sol = psol[...].copy()
output('%s(%s) convergence: %s (%s)'
% (self.conf.method, self.conf.precond,
ksp.reason, self.converged_reasons[ksp.reason]))
return sol
class PETScParallelKrylovSolver(PETScKrylovSolver):
"""
PETSc Krylov subspace solver able to run in parallel by storing the
system to disk and running a separate script via `mpiexec`.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc_parallel'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1 = {
'name' : 'ls',
'kind' : 'ls.petsc_parallel',
'log_dir' : '.', # Store logs here.
'n_proc' : 5, # Number of processes to run.
'method' : 'cg', # ksp_type
'precond' : 'bjacobi', # pc_type
'sub_precond' : 'icc', # sub_pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = PETScKrylovSolver.process_conf(conf, kwargs)
return Struct(log_dir=get('log_dir', '.'),
n_proc=get('n_proc', 1),
sub_precond=get('sub_precond', 'icc')) + common
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import os, sys, shutil, tempfile
from sfepy import base_dir
from sfepy.base.ioutils import ensure_path
eps_a = | get_default(eps_a, self.conf.eps_a) | sfepy.base.base.get_default |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs)
ksp = PETSc.KSP().create()
ksp.setType( self.conf.method )
ksp.getPC().setType( self.conf.precond )
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
self.ksp = ksp
self.converged_reasons = {}
for key, val in ksp.ConvergedReason.__dict__.iteritems():
if isinstance(val, int):
self.converged_reasons[val] = key
def set_matrix( self, mtx ):
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat().createAIJ( mtx.shape,
csr = (mtx.indptr,
mtx.indices,
mtx.data) )
sol, rhs = pmtx.getVecs()
return pmtx, sol, rhs
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
ksp.setInitialGuessNonzero(True)
prhs[...] = rhs
ksp.solve(prhs, psol)
sol = psol[...].copy()
output('%s(%s) convergence: %s (%s)'
% (self.conf.method, self.conf.precond,
ksp.reason, self.converged_reasons[ksp.reason]))
return sol
class PETScParallelKrylovSolver(PETScKrylovSolver):
"""
PETSc Krylov subspace solver able to run in parallel by storing the
system to disk and running a separate script via `mpiexec`.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc_parallel'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1 = {
'name' : 'ls',
'kind' : 'ls.petsc_parallel',
'log_dir' : '.', # Store logs here.
'n_proc' : 5, # Number of processes to run.
'method' : 'cg', # ksp_type
'precond' : 'bjacobi', # pc_type
'sub_precond' : 'icc', # sub_pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = PETScKrylovSolver.process_conf(conf, kwargs)
return Struct(log_dir=get('log_dir', '.'),
n_proc=get('n_proc', 1),
sub_precond=get('sub_precond', 'icc')) + common
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import os, sys, shutil, tempfile
from sfepy import base_dir
from sfepy.base.ioutils import ensure_path
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = | get_default(eps_r, self.conf.eps_r) | sfepy.base.base.get_default |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs)
ksp = PETSc.KSP().create()
ksp.setType( self.conf.method )
ksp.getPC().setType( self.conf.precond )
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
self.ksp = ksp
self.converged_reasons = {}
for key, val in ksp.ConvergedReason.__dict__.iteritems():
if isinstance(val, int):
self.converged_reasons[val] = key
def set_matrix( self, mtx ):
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat().createAIJ( mtx.shape,
csr = (mtx.indptr,
mtx.indices,
mtx.data) )
sol, rhs = pmtx.getVecs()
return pmtx, sol, rhs
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
ksp.setInitialGuessNonzero(True)
prhs[...] = rhs
ksp.solve(prhs, psol)
sol = psol[...].copy()
output('%s(%s) convergence: %s (%s)'
% (self.conf.method, self.conf.precond,
ksp.reason, self.converged_reasons[ksp.reason]))
return sol
class PETScParallelKrylovSolver(PETScKrylovSolver):
"""
PETSc Krylov subspace solver able to run in parallel by storing the
system to disk and running a separate script via `mpiexec`.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc_parallel'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1 = {
'name' : 'ls',
'kind' : 'ls.petsc_parallel',
'log_dir' : '.', # Store logs here.
'n_proc' : 5, # Number of processes to run.
'method' : 'cg', # ksp_type
'precond' : 'bjacobi', # pc_type
'sub_precond' : 'icc', # sub_pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = PETScKrylovSolver.process_conf(conf, kwargs)
return Struct(log_dir=get('log_dir', '.'),
n_proc=get('n_proc', 1),
sub_precond=get('sub_precond', 'icc')) + common
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import os, sys, shutil, tempfile
from sfepy import base_dir
from sfepy.base.ioutils import ensure_path
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = | get_default(i_max, self.conf.i_max) | sfepy.base.base.get_default |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs)
ksp = PETSc.KSP().create()
ksp.setType( self.conf.method )
ksp.getPC().setType( self.conf.precond )
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
self.ksp = ksp
self.converged_reasons = {}
for key, val in ksp.ConvergedReason.__dict__.iteritems():
if isinstance(val, int):
self.converged_reasons[val] = key
def set_matrix( self, mtx ):
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat().createAIJ( mtx.shape,
csr = (mtx.indptr,
mtx.indices,
mtx.data) )
sol, rhs = pmtx.getVecs()
return pmtx, sol, rhs
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
ksp.setInitialGuessNonzero(True)
prhs[...] = rhs
ksp.solve(prhs, psol)
sol = psol[...].copy()
output('%s(%s) convergence: %s (%s)'
% (self.conf.method, self.conf.precond,
ksp.reason, self.converged_reasons[ksp.reason]))
return sol
class PETScParallelKrylovSolver(PETScKrylovSolver):
"""
PETSc Krylov subspace solver able to run in parallel by storing the
system to disk and running a separate script via `mpiexec`.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc_parallel'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1 = {
'name' : 'ls',
'kind' : 'ls.petsc_parallel',
'log_dir' : '.', # Store logs here.
'n_proc' : 5, # Number of processes to run.
'method' : 'cg', # ksp_type
'precond' : 'bjacobi', # pc_type
'sub_precond' : 'icc', # sub_pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = PETScKrylovSolver.process_conf(conf, kwargs)
return Struct(log_dir=get('log_dir', '.'),
n_proc=get('n_proc', 1),
sub_precond=get('sub_precond', 'icc')) + common
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import os, sys, shutil, tempfile
from sfepy import base_dir
from sfepy.base.ioutils import ensure_path
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
petsc = self.petsc
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
output_dir = tempfile.mkdtemp()
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
sol0_filename = os.path.join(output_dir, 'sol0.dat')
else:
sol0_filename = ''
prhs[...] = rhs
script_filename = os.path.join(base_dir, 'solvers/petsc_worker.py')
mtx_filename = os.path.join(output_dir, 'mtx.dat')
rhs_filename = os.path.join(output_dir, 'rhs.dat')
sol_filename = os.path.join(output_dir, 'sol.dat')
status_filename = os.path.join(output_dir, 'status.txt')
log_filename = os.path.join(self.conf.log_dir, 'sol.log')
| ensure_path(log_filename) | sfepy.base.ioutils.ensure_path |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs)
ksp = PETSc.KSP().create()
ksp.setType( self.conf.method )
ksp.getPC().setType( self.conf.precond )
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
self.ksp = ksp
self.converged_reasons = {}
for key, val in ksp.ConvergedReason.__dict__.iteritems():
if isinstance(val, int):
self.converged_reasons[val] = key
def set_matrix( self, mtx ):
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat().createAIJ( mtx.shape,
csr = (mtx.indptr,
mtx.indices,
mtx.data) )
sol, rhs = pmtx.getVecs()
return pmtx, sol, rhs
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
ksp.setInitialGuessNonzero(True)
prhs[...] = rhs
ksp.solve(prhs, psol)
sol = psol[...].copy()
output('%s(%s) convergence: %s (%s)'
% (self.conf.method, self.conf.precond,
ksp.reason, self.converged_reasons[ksp.reason]))
return sol
class PETScParallelKrylovSolver(PETScKrylovSolver):
"""
PETSc Krylov subspace solver able to run in parallel by storing the
system to disk and running a separate script via `mpiexec`.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc_parallel'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1 = {
'name' : 'ls',
'kind' : 'ls.petsc_parallel',
'log_dir' : '.', # Store logs here.
'n_proc' : 5, # Number of processes to run.
'method' : 'cg', # ksp_type
'precond' : 'bjacobi', # pc_type
'sub_precond' : 'icc', # sub_pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = PETScKrylovSolver.process_conf(conf, kwargs)
return Struct(log_dir=get('log_dir', '.'),
n_proc=get('n_proc', 1),
sub_precond=get('sub_precond', 'icc')) + common
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import os, sys, shutil, tempfile
from sfepy import base_dir
from sfepy.base.ioutils import ensure_path
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
petsc = self.petsc
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
output_dir = tempfile.mkdtemp()
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
sol0_filename = os.path.join(output_dir, 'sol0.dat')
else:
sol0_filename = ''
prhs[...] = rhs
script_filename = os.path.join(base_dir, 'solvers/petsc_worker.py')
mtx_filename = os.path.join(output_dir, 'mtx.dat')
rhs_filename = os.path.join(output_dir, 'rhs.dat')
sol_filename = os.path.join(output_dir, 'sol.dat')
status_filename = os.path.join(output_dir, 'status.txt')
log_filename = os.path.join(self.conf.log_dir, 'sol.log')
ensure_path(log_filename)
| output('storing system to %s...' % output_dir) | sfepy.base.base.output |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs)
ksp = PETSc.KSP().create()
ksp.setType( self.conf.method )
ksp.getPC().setType( self.conf.precond )
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
self.ksp = ksp
self.converged_reasons = {}
for key, val in ksp.ConvergedReason.__dict__.iteritems():
if isinstance(val, int):
self.converged_reasons[val] = key
def set_matrix( self, mtx ):
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat().createAIJ( mtx.shape,
csr = (mtx.indptr,
mtx.indices,
mtx.data) )
sol, rhs = pmtx.getVecs()
return pmtx, sol, rhs
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
ksp.setInitialGuessNonzero(True)
prhs[...] = rhs
ksp.solve(prhs, psol)
sol = psol[...].copy()
output('%s(%s) convergence: %s (%s)'
% (self.conf.method, self.conf.precond,
ksp.reason, self.converged_reasons[ksp.reason]))
return sol
class PETScParallelKrylovSolver(PETScKrylovSolver):
"""
PETSc Krylov subspace solver able to run in parallel by storing the
system to disk and running a separate script via `mpiexec`.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc_parallel'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1 = {
'name' : 'ls',
'kind' : 'ls.petsc_parallel',
'log_dir' : '.', # Store logs here.
'n_proc' : 5, # Number of processes to run.
'method' : 'cg', # ksp_type
'precond' : 'bjacobi', # pc_type
'sub_precond' : 'icc', # sub_pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = PETScKrylovSolver.process_conf(conf, kwargs)
return Struct(log_dir=get('log_dir', '.'),
n_proc=get('n_proc', 1),
sub_precond=get('sub_precond', 'icc')) + common
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import os, sys, shutil, tempfile
from sfepy import base_dir
from sfepy.base.ioutils import ensure_path
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
petsc = self.petsc
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
output_dir = tempfile.mkdtemp()
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
sol0_filename = os.path.join(output_dir, 'sol0.dat')
else:
sol0_filename = ''
prhs[...] = rhs
script_filename = os.path.join(base_dir, 'solvers/petsc_worker.py')
mtx_filename = os.path.join(output_dir, 'mtx.dat')
rhs_filename = os.path.join(output_dir, 'rhs.dat')
sol_filename = os.path.join(output_dir, 'sol.dat')
status_filename = os.path.join(output_dir, 'status.txt')
log_filename = os.path.join(self.conf.log_dir, 'sol.log')
ensure_path(log_filename)
output('storing system to %s...' % output_dir)
tt = time.clock()
view_mtx = petsc.Viewer().createBinary(mtx_filename, mode='w')
view_rhs = petsc.Viewer().createBinary(rhs_filename, mode='w')
pmtx.view(view_mtx)
prhs.view(view_rhs)
if sol0_filename:
view_sol0 = petsc.Viewer().createBinary(sol0_filename, mode='w')
psol.view(view_sol0)
output('...done in %.2f s' % (time.clock() - tt))
command = [
'mpiexec -n %d' % self.conf.n_proc,
sys.executable, script_filename,
'-mtx %s' % mtx_filename, '-rhs %s' % rhs_filename,
'-sol0 %s' % sol0_filename, '-sol %s' % sol_filename,
'-status %s' % status_filename,
'-ksp_type %s' % self.conf.method,
'-pc_type %s' % self.conf.precond,
'-sub_pc_type %s' % self.conf.sub_precond,
'-ksp_atol %.3e' % self.conf.eps_a,
'-ksp_rtol %.3e' % self.conf.eps_r,
'-ksp_max_it %d' % self.conf.i_max,
'-ksp_monitor %s' % log_filename,
'-ksp_view %s' % log_filename,
]
if self.conf.precond_side is not None:
command.append('-ksp_pc_side %s' % self.conf.precond_side)
out = os.system(" ".join(command))
| assert_(out == 0) | sfepy.base.base.assert_ |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs)
ksp = PETSc.KSP().create()
ksp.setType( self.conf.method )
ksp.getPC().setType( self.conf.precond )
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
self.ksp = ksp
self.converged_reasons = {}
for key, val in ksp.ConvergedReason.__dict__.iteritems():
if isinstance(val, int):
self.converged_reasons[val] = key
def set_matrix( self, mtx ):
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat().createAIJ( mtx.shape,
csr = (mtx.indptr,
mtx.indices,
mtx.data) )
sol, rhs = pmtx.getVecs()
return pmtx, sol, rhs
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
ksp.setInitialGuessNonzero(True)
prhs[...] = rhs
ksp.solve(prhs, psol)
sol = psol[...].copy()
output('%s(%s) convergence: %s (%s)'
% (self.conf.method, self.conf.precond,
ksp.reason, self.converged_reasons[ksp.reason]))
return sol
class PETScParallelKrylovSolver(PETScKrylovSolver):
"""
PETSc Krylov subspace solver able to run in parallel by storing the
system to disk and running a separate script via `mpiexec`.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc_parallel'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1 = {
'name' : 'ls',
'kind' : 'ls.petsc_parallel',
'log_dir' : '.', # Store logs here.
'n_proc' : 5, # Number of processes to run.
'method' : 'cg', # ksp_type
'precond' : 'bjacobi', # pc_type
'sub_precond' : 'icc', # sub_pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = PETScKrylovSolver.process_conf(conf, kwargs)
return Struct(log_dir=get('log_dir', '.'),
n_proc=get('n_proc', 1),
sub_precond=get('sub_precond', 'icc')) + common
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import os, sys, shutil, tempfile
from sfepy import base_dir
from sfepy.base.ioutils import ensure_path
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
petsc = self.petsc
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
output_dir = tempfile.mkdtemp()
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
sol0_filename = os.path.join(output_dir, 'sol0.dat')
else:
sol0_filename = ''
prhs[...] = rhs
script_filename = os.path.join(base_dir, 'solvers/petsc_worker.py')
mtx_filename = os.path.join(output_dir, 'mtx.dat')
rhs_filename = os.path.join(output_dir, 'rhs.dat')
sol_filename = os.path.join(output_dir, 'sol.dat')
status_filename = os.path.join(output_dir, 'status.txt')
log_filename = os.path.join(self.conf.log_dir, 'sol.log')
ensure_path(log_filename)
output('storing system to %s...' % output_dir)
tt = time.clock()
view_mtx = petsc.Viewer().createBinary(mtx_filename, mode='w')
view_rhs = petsc.Viewer().createBinary(rhs_filename, mode='w')
pmtx.view(view_mtx)
prhs.view(view_rhs)
if sol0_filename:
view_sol0 = petsc.Viewer().createBinary(sol0_filename, mode='w')
psol.view(view_sol0)
output('...done in %.2f s' % (time.clock() - tt))
command = [
'mpiexec -n %d' % self.conf.n_proc,
sys.executable, script_filename,
'-mtx %s' % mtx_filename, '-rhs %s' % rhs_filename,
'-sol0 %s' % sol0_filename, '-sol %s' % sol_filename,
'-status %s' % status_filename,
'-ksp_type %s' % self.conf.method,
'-pc_type %s' % self.conf.precond,
'-sub_pc_type %s' % self.conf.sub_precond,
'-ksp_atol %.3e' % self.conf.eps_a,
'-ksp_rtol %.3e' % self.conf.eps_r,
'-ksp_max_it %d' % self.conf.i_max,
'-ksp_monitor %s' % log_filename,
'-ksp_view %s' % log_filename,
]
if self.conf.precond_side is not None:
command.append('-ksp_pc_side %s' % self.conf.precond_side)
out = os.system(" ".join(command))
assert_(out == 0)
| output('reading solution...') | sfepy.base.base.output |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs)
ksp = PETSc.KSP().create()
ksp.setType( self.conf.method )
ksp.getPC().setType( self.conf.precond )
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
self.ksp = ksp
self.converged_reasons = {}
for key, val in ksp.ConvergedReason.__dict__.iteritems():
if isinstance(val, int):
self.converged_reasons[val] = key
def set_matrix( self, mtx ):
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat().createAIJ( mtx.shape,
csr = (mtx.indptr,
mtx.indices,
mtx.data) )
sol, rhs = pmtx.getVecs()
return pmtx, sol, rhs
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
ksp.setInitialGuessNonzero(True)
prhs[...] = rhs
ksp.solve(prhs, psol)
sol = psol[...].copy()
output('%s(%s) convergence: %s (%s)'
% (self.conf.method, self.conf.precond,
ksp.reason, self.converged_reasons[ksp.reason]))
return sol
class PETScParallelKrylovSolver(PETScKrylovSolver):
"""
PETSc Krylov subspace solver able to run in parallel by storing the
system to disk and running a separate script via `mpiexec`.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc_parallel'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1 = {
'name' : 'ls',
'kind' : 'ls.petsc_parallel',
'log_dir' : '.', # Store logs here.
'n_proc' : 5, # Number of processes to run.
'method' : 'cg', # ksp_type
'precond' : 'bjacobi', # pc_type
'sub_precond' : 'icc', # sub_pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = PETScKrylovSolver.process_conf(conf, kwargs)
return Struct(log_dir=get('log_dir', '.'),
n_proc=get('n_proc', 1),
sub_precond=get('sub_precond', 'icc')) + common
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import os, sys, shutil, tempfile
from sfepy import base_dir
from sfepy.base.ioutils import ensure_path
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
petsc = self.petsc
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
output_dir = tempfile.mkdtemp()
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
sol0_filename = os.path.join(output_dir, 'sol0.dat')
else:
sol0_filename = ''
prhs[...] = rhs
script_filename = os.path.join(base_dir, 'solvers/petsc_worker.py')
mtx_filename = os.path.join(output_dir, 'mtx.dat')
rhs_filename = os.path.join(output_dir, 'rhs.dat')
sol_filename = os.path.join(output_dir, 'sol.dat')
status_filename = os.path.join(output_dir, 'status.txt')
log_filename = os.path.join(self.conf.log_dir, 'sol.log')
ensure_path(log_filename)
output('storing system to %s...' % output_dir)
tt = time.clock()
view_mtx = petsc.Viewer().createBinary(mtx_filename, mode='w')
view_rhs = petsc.Viewer().createBinary(rhs_filename, mode='w')
pmtx.view(view_mtx)
prhs.view(view_rhs)
if sol0_filename:
view_sol0 = petsc.Viewer().createBinary(sol0_filename, mode='w')
psol.view(view_sol0)
output('...done in %.2f s' % (time.clock() - tt))
command = [
'mpiexec -n %d' % self.conf.n_proc,
sys.executable, script_filename,
'-mtx %s' % mtx_filename, '-rhs %s' % rhs_filename,
'-sol0 %s' % sol0_filename, '-sol %s' % sol_filename,
'-status %s' % status_filename,
'-ksp_type %s' % self.conf.method,
'-pc_type %s' % self.conf.precond,
'-sub_pc_type %s' % self.conf.sub_precond,
'-ksp_atol %.3e' % self.conf.eps_a,
'-ksp_rtol %.3e' % self.conf.eps_r,
'-ksp_max_it %d' % self.conf.i_max,
'-ksp_monitor %s' % log_filename,
'-ksp_view %s' % log_filename,
]
if self.conf.precond_side is not None:
command.append('-ksp_pc_side %s' % self.conf.precond_side)
out = os.system(" ".join(command))
assert_(out == 0)
output('reading solution...')
tt = time.clock()
view_sol = self.petsc.Viewer().createBinary(sol_filename, mode='r')
psol = petsc.Vec().load(view_sol)
fd = open(status_filename, 'r')
line = fd.readline().split()
reason = int(line[0])
elapsed = float(line[1])
fd.close()
output('...done in %.2f s' % (time.clock() - tt))
sol = psol[...].copy()
output('%s(%s, %s/proc) convergence: %s (%s)'
% (self.conf.method, self.conf.precond, self.conf.sub_precond,
reason, self.converged_reasons[reason]))
| output('elapsed: %.2f [s]' % elapsed) | sfepy.base.base.output |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs)
ksp = PETSc.KSP().create()
ksp.setType( self.conf.method )
ksp.getPC().setType( self.conf.precond )
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
self.ksp = ksp
self.converged_reasons = {}
for key, val in ksp.ConvergedReason.__dict__.iteritems():
if isinstance(val, int):
self.converged_reasons[val] = key
def set_matrix( self, mtx ):
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat().createAIJ( mtx.shape,
csr = (mtx.indptr,
mtx.indices,
mtx.data) )
sol, rhs = pmtx.getVecs()
return pmtx, sol, rhs
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
ksp.setInitialGuessNonzero(True)
prhs[...] = rhs
ksp.solve(prhs, psol)
sol = psol[...].copy()
output('%s(%s) convergence: %s (%s)'
% (self.conf.method, self.conf.precond,
ksp.reason, self.converged_reasons[ksp.reason]))
return sol
class PETScParallelKrylovSolver(PETScKrylovSolver):
"""
PETSc Krylov subspace solver able to run in parallel by storing the
system to disk and running a separate script via `mpiexec`.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc_parallel'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1 = {
'name' : 'ls',
'kind' : 'ls.petsc_parallel',
'log_dir' : '.', # Store logs here.
'n_proc' : 5, # Number of processes to run.
'method' : 'cg', # ksp_type
'precond' : 'bjacobi', # pc_type
'sub_precond' : 'icc', # sub_pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = PETScKrylovSolver.process_conf(conf, kwargs)
return Struct(log_dir=get('log_dir', '.'),
n_proc=get('n_proc', 1),
sub_precond=get('sub_precond', 'icc')) + common
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import os, sys, shutil, tempfile
from sfepy import base_dir
from sfepy.base.ioutils import ensure_path
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
petsc = self.petsc
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
output_dir = tempfile.mkdtemp()
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
sol0_filename = os.path.join(output_dir, 'sol0.dat')
else:
sol0_filename = ''
prhs[...] = rhs
script_filename = os.path.join(base_dir, 'solvers/petsc_worker.py')
mtx_filename = os.path.join(output_dir, 'mtx.dat')
rhs_filename = os.path.join(output_dir, 'rhs.dat')
sol_filename = os.path.join(output_dir, 'sol.dat')
status_filename = os.path.join(output_dir, 'status.txt')
log_filename = os.path.join(self.conf.log_dir, 'sol.log')
ensure_path(log_filename)
output('storing system to %s...' % output_dir)
tt = time.clock()
view_mtx = petsc.Viewer().createBinary(mtx_filename, mode='w')
view_rhs = petsc.Viewer().createBinary(rhs_filename, mode='w')
pmtx.view(view_mtx)
prhs.view(view_rhs)
if sol0_filename:
view_sol0 = petsc.Viewer().createBinary(sol0_filename, mode='w')
psol.view(view_sol0)
output('...done in %.2f s' % (time.clock() - tt))
command = [
'mpiexec -n %d' % self.conf.n_proc,
sys.executable, script_filename,
'-mtx %s' % mtx_filename, '-rhs %s' % rhs_filename,
'-sol0 %s' % sol0_filename, '-sol %s' % sol_filename,
'-status %s' % status_filename,
'-ksp_type %s' % self.conf.method,
'-pc_type %s' % self.conf.precond,
'-sub_pc_type %s' % self.conf.sub_precond,
'-ksp_atol %.3e' % self.conf.eps_a,
'-ksp_rtol %.3e' % self.conf.eps_r,
'-ksp_max_it %d' % self.conf.i_max,
'-ksp_monitor %s' % log_filename,
'-ksp_view %s' % log_filename,
]
if self.conf.precond_side is not None:
command.append('-ksp_pc_side %s' % self.conf.precond_side)
out = os.system(" ".join(command))
assert_(out == 0)
output('reading solution...')
tt = time.clock()
view_sol = self.petsc.Viewer().createBinary(sol_filename, mode='r')
psol = petsc.Vec().load(view_sol)
fd = open(status_filename, 'r')
line = fd.readline().split()
reason = int(line[0])
elapsed = float(line[1])
fd.close()
output('...done in %.2f s' % (time.clock() - tt))
sol = psol[...].copy()
output('%s(%s, %s/proc) convergence: %s (%s)'
% (self.conf.method, self.conf.precond, self.conf.sub_precond,
reason, self.converged_reasons[reason]))
output('elapsed: %.2f [s]' % elapsed)
shutil.rmtree(output_dir)
return sol
class SchurGeneralized(ScipyDirect):
r"""
Generalized Schur complement.
Defines the matrix blocks and calls user defined function.
"""
name = 'ls.schur_generalized'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.schur_generalized',
{'blocks':
{'u': ['displacement1', 'displacement2'],
'v': ['velocity1', 'velocity2'],
'w': ['pressure1', 'pressure2'],
},
'function': my_schur,
'needs_problem_instance': True,
})
}
"""
get = | make_get_conf(conf, kwargs) | sfepy.solvers.solvers.make_get_conf |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs)
ksp = PETSc.KSP().create()
ksp.setType( self.conf.method )
ksp.getPC().setType( self.conf.precond )
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
self.ksp = ksp
self.converged_reasons = {}
for key, val in ksp.ConvergedReason.__dict__.iteritems():
if isinstance(val, int):
self.converged_reasons[val] = key
def set_matrix( self, mtx ):
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat().createAIJ( mtx.shape,
csr = (mtx.indptr,
mtx.indices,
mtx.data) )
sol, rhs = pmtx.getVecs()
return pmtx, sol, rhs
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
ksp.setInitialGuessNonzero(True)
prhs[...] = rhs
ksp.solve(prhs, psol)
sol = psol[...].copy()
output('%s(%s) convergence: %s (%s)'
% (self.conf.method, self.conf.precond,
ksp.reason, self.converged_reasons[ksp.reason]))
return sol
class PETScParallelKrylovSolver(PETScKrylovSolver):
"""
PETSc Krylov subspace solver able to run in parallel by storing the
system to disk and running a separate script via `mpiexec`.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc_parallel'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1 = {
'name' : 'ls',
'kind' : 'ls.petsc_parallel',
'log_dir' : '.', # Store logs here.
'n_proc' : 5, # Number of processes to run.
'method' : 'cg', # ksp_type
'precond' : 'bjacobi', # pc_type
'sub_precond' : 'icc', # sub_pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = PETScKrylovSolver.process_conf(conf, kwargs)
return Struct(log_dir=get('log_dir', '.'),
n_proc=get('n_proc', 1),
sub_precond=get('sub_precond', 'icc')) + common
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import os, sys, shutil, tempfile
from sfepy import base_dir
from sfepy.base.ioutils import ensure_path
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
petsc = self.petsc
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
output_dir = tempfile.mkdtemp()
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
sol0_filename = os.path.join(output_dir, 'sol0.dat')
else:
sol0_filename = ''
prhs[...] = rhs
script_filename = os.path.join(base_dir, 'solvers/petsc_worker.py')
mtx_filename = os.path.join(output_dir, 'mtx.dat')
rhs_filename = os.path.join(output_dir, 'rhs.dat')
sol_filename = os.path.join(output_dir, 'sol.dat')
status_filename = os.path.join(output_dir, 'status.txt')
log_filename = os.path.join(self.conf.log_dir, 'sol.log')
ensure_path(log_filename)
output('storing system to %s...' % output_dir)
tt = time.clock()
view_mtx = petsc.Viewer().createBinary(mtx_filename, mode='w')
view_rhs = petsc.Viewer().createBinary(rhs_filename, mode='w')
pmtx.view(view_mtx)
prhs.view(view_rhs)
if sol0_filename:
view_sol0 = petsc.Viewer().createBinary(sol0_filename, mode='w')
psol.view(view_sol0)
output('...done in %.2f s' % (time.clock() - tt))
command = [
'mpiexec -n %d' % self.conf.n_proc,
sys.executable, script_filename,
'-mtx %s' % mtx_filename, '-rhs %s' % rhs_filename,
'-sol0 %s' % sol0_filename, '-sol %s' % sol_filename,
'-status %s' % status_filename,
'-ksp_type %s' % self.conf.method,
'-pc_type %s' % self.conf.precond,
'-sub_pc_type %s' % self.conf.sub_precond,
'-ksp_atol %.3e' % self.conf.eps_a,
'-ksp_rtol %.3e' % self.conf.eps_r,
'-ksp_max_it %d' % self.conf.i_max,
'-ksp_monitor %s' % log_filename,
'-ksp_view %s' % log_filename,
]
if self.conf.precond_side is not None:
command.append('-ksp_pc_side %s' % self.conf.precond_side)
out = os.system(" ".join(command))
assert_(out == 0)
output('reading solution...')
tt = time.clock()
view_sol = self.petsc.Viewer().createBinary(sol_filename, mode='r')
psol = petsc.Vec().load(view_sol)
fd = open(status_filename, 'r')
line = fd.readline().split()
reason = int(line[0])
elapsed = float(line[1])
fd.close()
output('...done in %.2f s' % (time.clock() - tt))
sol = psol[...].copy()
output('%s(%s, %s/proc) convergence: %s (%s)'
% (self.conf.method, self.conf.precond, self.conf.sub_precond,
reason, self.converged_reasons[reason]))
output('elapsed: %.2f [s]' % elapsed)
shutil.rmtree(output_dir)
return sol
class SchurGeneralized(ScipyDirect):
r"""
Generalized Schur complement.
Defines the matrix blocks and calls user defined function.
"""
name = 'ls.schur_generalized'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.schur_generalized',
{'blocks':
{'u': ['displacement1', 'displacement2'],
'v': ['velocity1', 'velocity2'],
'w': ['pressure1', 'pressure2'],
},
'function': my_schur,
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
common = ScipyDirect.process_conf(conf, kwargs)
return Struct(blocks=get('blocks', None,
'missing "blocks" in options!'),
function=get('function', None,
'missing "function" in options!'),
needs_problem_instance=True) + common
def __init__(self, conf, **kwargs):
from sfepy.discrete.state import State
ScipyDirect.__init__(self, conf, **kwargs)
equations = self.problem.equations
aux_state = | State(equations.variables) | sfepy.discrete.state.State |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs)
ksp = PETSc.KSP().create()
ksp.setType( self.conf.method )
ksp.getPC().setType( self.conf.precond )
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
self.ksp = ksp
self.converged_reasons = {}
for key, val in ksp.ConvergedReason.__dict__.iteritems():
if isinstance(val, int):
self.converged_reasons[val] = key
def set_matrix( self, mtx ):
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat().createAIJ( mtx.shape,
csr = (mtx.indptr,
mtx.indices,
mtx.data) )
sol, rhs = pmtx.getVecs()
return pmtx, sol, rhs
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
ksp.setInitialGuessNonzero(True)
prhs[...] = rhs
ksp.solve(prhs, psol)
sol = psol[...].copy()
output('%s(%s) convergence: %s (%s)'
% (self.conf.method, self.conf.precond,
ksp.reason, self.converged_reasons[ksp.reason]))
return sol
class PETScParallelKrylovSolver(PETScKrylovSolver):
"""
PETSc Krylov subspace solver able to run in parallel by storing the
system to disk and running a separate script via `mpiexec`.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc_parallel'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1 = {
'name' : 'ls',
'kind' : 'ls.petsc_parallel',
'log_dir' : '.', # Store logs here.
'n_proc' : 5, # Number of processes to run.
'method' : 'cg', # ksp_type
'precond' : 'bjacobi', # pc_type
'sub_precond' : 'icc', # sub_pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = PETScKrylovSolver.process_conf(conf, kwargs)
return Struct(log_dir=get('log_dir', '.'),
n_proc=get('n_proc', 1),
sub_precond=get('sub_precond', 'icc')) + common
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import os, sys, shutil, tempfile
from sfepy import base_dir
from sfepy.base.ioutils import ensure_path
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
petsc = self.petsc
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
output_dir = tempfile.mkdtemp()
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
sol0_filename = os.path.join(output_dir, 'sol0.dat')
else:
sol0_filename = ''
prhs[...] = rhs
script_filename = os.path.join(base_dir, 'solvers/petsc_worker.py')
mtx_filename = os.path.join(output_dir, 'mtx.dat')
rhs_filename = os.path.join(output_dir, 'rhs.dat')
sol_filename = os.path.join(output_dir, 'sol.dat')
status_filename = os.path.join(output_dir, 'status.txt')
log_filename = os.path.join(self.conf.log_dir, 'sol.log')
ensure_path(log_filename)
output('storing system to %s...' % output_dir)
tt = time.clock()
view_mtx = petsc.Viewer().createBinary(mtx_filename, mode='w')
view_rhs = petsc.Viewer().createBinary(rhs_filename, mode='w')
pmtx.view(view_mtx)
prhs.view(view_rhs)
if sol0_filename:
view_sol0 = petsc.Viewer().createBinary(sol0_filename, mode='w')
psol.view(view_sol0)
output('...done in %.2f s' % (time.clock() - tt))
command = [
'mpiexec -n %d' % self.conf.n_proc,
sys.executable, script_filename,
'-mtx %s' % mtx_filename, '-rhs %s' % rhs_filename,
'-sol0 %s' % sol0_filename, '-sol %s' % sol_filename,
'-status %s' % status_filename,
'-ksp_type %s' % self.conf.method,
'-pc_type %s' % self.conf.precond,
'-sub_pc_type %s' % self.conf.sub_precond,
'-ksp_atol %.3e' % self.conf.eps_a,
'-ksp_rtol %.3e' % self.conf.eps_r,
'-ksp_max_it %d' % self.conf.i_max,
'-ksp_monitor %s' % log_filename,
'-ksp_view %s' % log_filename,
]
if self.conf.precond_side is not None:
command.append('-ksp_pc_side %s' % self.conf.precond_side)
out = os.system(" ".join(command))
assert_(out == 0)
output('reading solution...')
tt = time.clock()
view_sol = self.petsc.Viewer().createBinary(sol_filename, mode='r')
psol = petsc.Vec().load(view_sol)
fd = open(status_filename, 'r')
line = fd.readline().split()
reason = int(line[0])
elapsed = float(line[1])
fd.close()
output('...done in %.2f s' % (time.clock() - tt))
sol = psol[...].copy()
output('%s(%s, %s/proc) convergence: %s (%s)'
% (self.conf.method, self.conf.precond, self.conf.sub_precond,
reason, self.converged_reasons[reason]))
output('elapsed: %.2f [s]' % elapsed)
shutil.rmtree(output_dir)
return sol
class SchurGeneralized(ScipyDirect):
r"""
Generalized Schur complement.
Defines the matrix blocks and calls user defined function.
"""
name = 'ls.schur_generalized'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.schur_generalized',
{'blocks':
{'u': ['displacement1', 'displacement2'],
'v': ['velocity1', 'velocity2'],
'w': ['pressure1', 'pressure2'],
},
'function': my_schur,
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
common = ScipyDirect.process_conf(conf, kwargs)
return Struct(blocks=get('blocks', None,
'missing "blocks" in options!'),
function=get('function', None,
'missing "function" in options!'),
needs_problem_instance=True) + common
def __init__(self, conf, **kwargs):
from sfepy.discrete.state import State
ScipyDirect.__init__(self, conf, **kwargs)
equations = self.problem.equations
aux_state = State(equations.variables)
conf.idxs = {}
for bk, bv in conf.blocks.iteritems():
aux_state.fill(0.0)
for jj in bv:
idx = equations.variables.di.indx[jj]
aux_state.vec[idx] = nm.nan
aux_state.apply_ebc()
vec0 = aux_state.get_reduced()
conf.idxs[bk] = nm.where(nm.isnan(vec0))[0]
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
mtxi= self.orig_conf.idxs
mtxslc_s = {}
mtxslc_f = {}
nn = {}
for ik, iv in mtxi.iteritems():
ptr = 0
nn[ik] = len(iv)
mtxslc_s[ik] = []
mtxslc_f[ik] = []
while ptr < nn[ik]:
idx0 = iv[ptr:]
idxrange = nm.arange(idx0[0], idx0[0] + len(idx0))
aux = nm.where(idx0 == idxrange)[0]
mtxslc_s[ik].append(slice(ptr + aux[0], ptr + aux[-1] + 1))
mtxslc_f[ik].append(slice(idx0[aux][0], idx0[aux][-1] + 1))
ptr += aux[-1] + 1
mtxs = {}
rhss = {}
ress = {}
for ir in mtxi.iterkeys():
rhss[ir] = nm.zeros((nn[ir],), dtype=nm.float64)
ress[ir] = nm.zeros((nn[ir],), dtype=nm.float64)
for jr, idxr in enumerate(mtxslc_f[ir]):
rhss[ir][mtxslc_s[ir][jr]] = rhs[idxr]
for ic in mtxi.iterkeys():
mtxid = '%s%s' % (ir, ic)
mtxs[mtxid] = nm.zeros((nn[ir], nn[ic]), dtype=nm.float64)
for jr, idxr in enumerate(mtxslc_f[ir]):
for jc, idxc in enumerate(mtxslc_f[ic]):
iir = mtxslc_s[ir][jr]
iic = mtxslc_s[ic][jc]
mtxs[mtxid][iir, iic] = mtx._get_submatrix(idxr, idxc).todense()
self.orig_conf.function(ress, mtxs, rhss, nn)
res = nm.zeros_like(rhs)
for ir in mtxi.iterkeys():
for jr, idxr in enumerate(mtxslc_f[ir]):
res[idxr] = ress[ir][mtxslc_s[ir][jr]]
return res
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class SchurComplement(SchurGeneralized):
r"""
Schur complement.
Solution of the linear system
.. math::
\left[ \begin{array}{cc}
A & B \\
C & D \end{array} \right]
\cdot
\left[ \begin{array}{c}
u \\
v \end{array} \right]
=
\left[ \begin{array}{c}
f \\
g \end{array} \right]
is obtained by solving the following equation:
.. math::
(D - C A^{-1} B) \cdot v = g - C A^{-1} f
variable(s) :math:`u` are specified in "eliminate" list,
variable(s) :math:`v` are specified in "keep" list,
See: http://en.wikipedia.org/wiki/Schur_complement
"""
name = 'ls.schur_complement'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.schur_complement',
{'eliminate': ['displacement'],
'keep': ['pressure'],
'needs_problem_instance': True,
})
}
"""
get = | make_get_conf(conf, kwargs) | sfepy.solvers.solvers.make_get_conf |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs)
ksp = PETSc.KSP().create()
ksp.setType( self.conf.method )
ksp.getPC().setType( self.conf.precond )
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
self.ksp = ksp
self.converged_reasons = {}
for key, val in ksp.ConvergedReason.__dict__.iteritems():
if isinstance(val, int):
self.converged_reasons[val] = key
def set_matrix( self, mtx ):
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat().createAIJ( mtx.shape,
csr = (mtx.indptr,
mtx.indices,
mtx.data) )
sol, rhs = pmtx.getVecs()
return pmtx, sol, rhs
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
ksp.setInitialGuessNonzero(True)
prhs[...] = rhs
ksp.solve(prhs, psol)
sol = psol[...].copy()
output('%s(%s) convergence: %s (%s)'
% (self.conf.method, self.conf.precond,
ksp.reason, self.converged_reasons[ksp.reason]))
return sol
class PETScParallelKrylovSolver(PETScKrylovSolver):
"""
PETSc Krylov subspace solver able to run in parallel by storing the
system to disk and running a separate script via `mpiexec`.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc_parallel'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1 = {
'name' : 'ls',
'kind' : 'ls.petsc_parallel',
'log_dir' : '.', # Store logs here.
'n_proc' : 5, # Number of processes to run.
'method' : 'cg', # ksp_type
'precond' : 'bjacobi', # pc_type
'sub_precond' : 'icc', # sub_pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = PETScKrylovSolver.process_conf(conf, kwargs)
return Struct(log_dir=get('log_dir', '.'),
n_proc=get('n_proc', 1),
sub_precond=get('sub_precond', 'icc')) + common
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import os, sys, shutil, tempfile
from sfepy import base_dir
from sfepy.base.ioutils import ensure_path
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
petsc = self.petsc
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
output_dir = tempfile.mkdtemp()
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
sol0_filename = os.path.join(output_dir, 'sol0.dat')
else:
sol0_filename = ''
prhs[...] = rhs
script_filename = os.path.join(base_dir, 'solvers/petsc_worker.py')
mtx_filename = os.path.join(output_dir, 'mtx.dat')
rhs_filename = os.path.join(output_dir, 'rhs.dat')
sol_filename = os.path.join(output_dir, 'sol.dat')
status_filename = os.path.join(output_dir, 'status.txt')
log_filename = os.path.join(self.conf.log_dir, 'sol.log')
ensure_path(log_filename)
output('storing system to %s...' % output_dir)
tt = time.clock()
view_mtx = petsc.Viewer().createBinary(mtx_filename, mode='w')
view_rhs = petsc.Viewer().createBinary(rhs_filename, mode='w')
pmtx.view(view_mtx)
prhs.view(view_rhs)
if sol0_filename:
view_sol0 = petsc.Viewer().createBinary(sol0_filename, mode='w')
psol.view(view_sol0)
output('...done in %.2f s' % (time.clock() - tt))
command = [
'mpiexec -n %d' % self.conf.n_proc,
sys.executable, script_filename,
'-mtx %s' % mtx_filename, '-rhs %s' % rhs_filename,
'-sol0 %s' % sol0_filename, '-sol %s' % sol_filename,
'-status %s' % status_filename,
'-ksp_type %s' % self.conf.method,
'-pc_type %s' % self.conf.precond,
'-sub_pc_type %s' % self.conf.sub_precond,
'-ksp_atol %.3e' % self.conf.eps_a,
'-ksp_rtol %.3e' % self.conf.eps_r,
'-ksp_max_it %d' % self.conf.i_max,
'-ksp_monitor %s' % log_filename,
'-ksp_view %s' % log_filename,
]
if self.conf.precond_side is not None:
command.append('-ksp_pc_side %s' % self.conf.precond_side)
out = os.system(" ".join(command))
assert_(out == 0)
output('reading solution...')
tt = time.clock()
view_sol = self.petsc.Viewer().createBinary(sol_filename, mode='r')
psol = petsc.Vec().load(view_sol)
fd = open(status_filename, 'r')
line = fd.readline().split()
reason = int(line[0])
elapsed = float(line[1])
fd.close()
output('...done in %.2f s' % (time.clock() - tt))
sol = psol[...].copy()
output('%s(%s, %s/proc) convergence: %s (%s)'
% (self.conf.method, self.conf.precond, self.conf.sub_precond,
reason, self.converged_reasons[reason]))
output('elapsed: %.2f [s]' % elapsed)
shutil.rmtree(output_dir)
return sol
class SchurGeneralized(ScipyDirect):
r"""
Generalized Schur complement.
Defines the matrix blocks and calls user defined function.
"""
name = 'ls.schur_generalized'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.schur_generalized',
{'blocks':
{'u': ['displacement1', 'displacement2'],
'v': ['velocity1', 'velocity2'],
'w': ['pressure1', 'pressure2'],
},
'function': my_schur,
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
common = ScipyDirect.process_conf(conf, kwargs)
return Struct(blocks=get('blocks', None,
'missing "blocks" in options!'),
function=get('function', None,
'missing "function" in options!'),
needs_problem_instance=True) + common
def __init__(self, conf, **kwargs):
from sfepy.discrete.state import State
ScipyDirect.__init__(self, conf, **kwargs)
equations = self.problem.equations
aux_state = State(equations.variables)
conf.idxs = {}
for bk, bv in conf.blocks.iteritems():
aux_state.fill(0.0)
for jj in bv:
idx = equations.variables.di.indx[jj]
aux_state.vec[idx] = nm.nan
aux_state.apply_ebc()
vec0 = aux_state.get_reduced()
conf.idxs[bk] = nm.where(nm.isnan(vec0))[0]
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
mtxi= self.orig_conf.idxs
mtxslc_s = {}
mtxslc_f = {}
nn = {}
for ik, iv in mtxi.iteritems():
ptr = 0
nn[ik] = len(iv)
mtxslc_s[ik] = []
mtxslc_f[ik] = []
while ptr < nn[ik]:
idx0 = iv[ptr:]
idxrange = nm.arange(idx0[0], idx0[0] + len(idx0))
aux = nm.where(idx0 == idxrange)[0]
mtxslc_s[ik].append(slice(ptr + aux[0], ptr + aux[-1] + 1))
mtxslc_f[ik].append(slice(idx0[aux][0], idx0[aux][-1] + 1))
ptr += aux[-1] + 1
mtxs = {}
rhss = {}
ress = {}
for ir in mtxi.iterkeys():
rhss[ir] = nm.zeros((nn[ir],), dtype=nm.float64)
ress[ir] = nm.zeros((nn[ir],), dtype=nm.float64)
for jr, idxr in enumerate(mtxslc_f[ir]):
rhss[ir][mtxslc_s[ir][jr]] = rhs[idxr]
for ic in mtxi.iterkeys():
mtxid = '%s%s' % (ir, ic)
mtxs[mtxid] = nm.zeros((nn[ir], nn[ic]), dtype=nm.float64)
for jr, idxr in enumerate(mtxslc_f[ir]):
for jc, idxc in enumerate(mtxslc_f[ic]):
iir = mtxslc_s[ir][jr]
iic = mtxslc_s[ic][jc]
mtxs[mtxid][iir, iic] = mtx._get_submatrix(idxr, idxc).todense()
self.orig_conf.function(ress, mtxs, rhss, nn)
res = nm.zeros_like(rhs)
for ir in mtxi.iterkeys():
for jr, idxr in enumerate(mtxslc_f[ir]):
res[idxr] = ress[ir][mtxslc_s[ir][jr]]
return res
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class SchurComplement(SchurGeneralized):
r"""
Schur complement.
Solution of the linear system
.. math::
\left[ \begin{array}{cc}
A & B \\
C & D \end{array} \right]
\cdot
\left[ \begin{array}{c}
u \\
v \end{array} \right]
=
\left[ \begin{array}{c}
f \\
g \end{array} \right]
is obtained by solving the following equation:
.. math::
(D - C A^{-1} B) \cdot v = g - C A^{-1} f
variable(s) :math:`u` are specified in "eliminate" list,
variable(s) :math:`v` are specified in "keep" list,
See: http://en.wikipedia.org/wiki/Schur_complement
"""
name = 'ls.schur_complement'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.schur_complement',
{'eliminate': ['displacement'],
'keep': ['pressure'],
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
conf.blocks = {'1': get('eliminate', None,
'missing "eliminate" in options!'),
'2': get('keep', None,
'missing "keep" in options!'),}
conf.function = SchurComplement.schur_fun
common = SchurGeneralized.process_conf(conf, kwargs)
return common
@staticmethod
def schur_fun(res, mtx, rhs, nn):
import scipy.sparse as scs
import scipy.sparse.linalg as sls
invA = sls.splu(scs.csc_matrix(mtx['11']))
invAB = nm.zeros_like(mtx['12'])
for j, b in enumerate(mtx['12'].T):
invAB[:,j] = invA.solve(b)
invAf = invA.solve(rhs['1'])
spC = scs.csc_matrix(mtx['21'])
k_rhs = rhs['2'] - spC * invAf
res['2'] = sls.spsolve(scs.csc_matrix(mtx['22'] - spC * invAB), k_rhs)
res['1'] = invAf - nm.dot(invAB, res['2'])
class MultiProblem(ScipyDirect):
r"""
Conjugate multiple problems.
Allows to define conjugate multiple problems.
"""
name = 'ls.cm_pb'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.cm_pb',
{'others': ['acoustic_subproblem.py'],
'coupling_variables': ['g'],
'needs_problem_instance': True,
})
}
"""
get = | make_get_conf(conf, kwargs) | sfepy.solvers.solvers.make_get_conf |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs)
ksp = PETSc.KSP().create()
ksp.setType( self.conf.method )
ksp.getPC().setType( self.conf.precond )
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
self.ksp = ksp
self.converged_reasons = {}
for key, val in ksp.ConvergedReason.__dict__.iteritems():
if isinstance(val, int):
self.converged_reasons[val] = key
def set_matrix( self, mtx ):
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat().createAIJ( mtx.shape,
csr = (mtx.indptr,
mtx.indices,
mtx.data) )
sol, rhs = pmtx.getVecs()
return pmtx, sol, rhs
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
ksp.setInitialGuessNonzero(True)
prhs[...] = rhs
ksp.solve(prhs, psol)
sol = psol[...].copy()
output('%s(%s) convergence: %s (%s)'
% (self.conf.method, self.conf.precond,
ksp.reason, self.converged_reasons[ksp.reason]))
return sol
class PETScParallelKrylovSolver(PETScKrylovSolver):
"""
PETSc Krylov subspace solver able to run in parallel by storing the
system to disk and running a separate script via `mpiexec`.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc_parallel'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1 = {
'name' : 'ls',
'kind' : 'ls.petsc_parallel',
'log_dir' : '.', # Store logs here.
'n_proc' : 5, # Number of processes to run.
'method' : 'cg', # ksp_type
'precond' : 'bjacobi', # pc_type
'sub_precond' : 'icc', # sub_pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = PETScKrylovSolver.process_conf(conf, kwargs)
return Struct(log_dir=get('log_dir', '.'),
n_proc=get('n_proc', 1),
sub_precond=get('sub_precond', 'icc')) + common
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import os, sys, shutil, tempfile
from sfepy import base_dir
from sfepy.base.ioutils import ensure_path
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
petsc = self.petsc
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
output_dir = tempfile.mkdtemp()
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
sol0_filename = os.path.join(output_dir, 'sol0.dat')
else:
sol0_filename = ''
prhs[...] = rhs
script_filename = os.path.join(base_dir, 'solvers/petsc_worker.py')
mtx_filename = os.path.join(output_dir, 'mtx.dat')
rhs_filename = os.path.join(output_dir, 'rhs.dat')
sol_filename = os.path.join(output_dir, 'sol.dat')
status_filename = os.path.join(output_dir, 'status.txt')
log_filename = os.path.join(self.conf.log_dir, 'sol.log')
ensure_path(log_filename)
output('storing system to %s...' % output_dir)
tt = time.clock()
view_mtx = petsc.Viewer().createBinary(mtx_filename, mode='w')
view_rhs = petsc.Viewer().createBinary(rhs_filename, mode='w')
pmtx.view(view_mtx)
prhs.view(view_rhs)
if sol0_filename:
view_sol0 = petsc.Viewer().createBinary(sol0_filename, mode='w')
psol.view(view_sol0)
output('...done in %.2f s' % (time.clock() - tt))
command = [
'mpiexec -n %d' % self.conf.n_proc,
sys.executable, script_filename,
'-mtx %s' % mtx_filename, '-rhs %s' % rhs_filename,
'-sol0 %s' % sol0_filename, '-sol %s' % sol_filename,
'-status %s' % status_filename,
'-ksp_type %s' % self.conf.method,
'-pc_type %s' % self.conf.precond,
'-sub_pc_type %s' % self.conf.sub_precond,
'-ksp_atol %.3e' % self.conf.eps_a,
'-ksp_rtol %.3e' % self.conf.eps_r,
'-ksp_max_it %d' % self.conf.i_max,
'-ksp_monitor %s' % log_filename,
'-ksp_view %s' % log_filename,
]
if self.conf.precond_side is not None:
command.append('-ksp_pc_side %s' % self.conf.precond_side)
out = os.system(" ".join(command))
assert_(out == 0)
output('reading solution...')
tt = time.clock()
view_sol = self.petsc.Viewer().createBinary(sol_filename, mode='r')
psol = petsc.Vec().load(view_sol)
fd = open(status_filename, 'r')
line = fd.readline().split()
reason = int(line[0])
elapsed = float(line[1])
fd.close()
output('...done in %.2f s' % (time.clock() - tt))
sol = psol[...].copy()
output('%s(%s, %s/proc) convergence: %s (%s)'
% (self.conf.method, self.conf.precond, self.conf.sub_precond,
reason, self.converged_reasons[reason]))
output('elapsed: %.2f [s]' % elapsed)
shutil.rmtree(output_dir)
return sol
class SchurGeneralized(ScipyDirect):
r"""
Generalized Schur complement.
Defines the matrix blocks and calls user defined function.
"""
name = 'ls.schur_generalized'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.schur_generalized',
{'blocks':
{'u': ['displacement1', 'displacement2'],
'v': ['velocity1', 'velocity2'],
'w': ['pressure1', 'pressure2'],
},
'function': my_schur,
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
common = ScipyDirect.process_conf(conf, kwargs)
return Struct(blocks=get('blocks', None,
'missing "blocks" in options!'),
function=get('function', None,
'missing "function" in options!'),
needs_problem_instance=True) + common
def __init__(self, conf, **kwargs):
from sfepy.discrete.state import State
ScipyDirect.__init__(self, conf, **kwargs)
equations = self.problem.equations
aux_state = State(equations.variables)
conf.idxs = {}
for bk, bv in conf.blocks.iteritems():
aux_state.fill(0.0)
for jj in bv:
idx = equations.variables.di.indx[jj]
aux_state.vec[idx] = nm.nan
aux_state.apply_ebc()
vec0 = aux_state.get_reduced()
conf.idxs[bk] = nm.where(nm.isnan(vec0))[0]
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
mtxi= self.orig_conf.idxs
mtxslc_s = {}
mtxslc_f = {}
nn = {}
for ik, iv in mtxi.iteritems():
ptr = 0
nn[ik] = len(iv)
mtxslc_s[ik] = []
mtxslc_f[ik] = []
while ptr < nn[ik]:
idx0 = iv[ptr:]
idxrange = nm.arange(idx0[0], idx0[0] + len(idx0))
aux = nm.where(idx0 == idxrange)[0]
mtxslc_s[ik].append(slice(ptr + aux[0], ptr + aux[-1] + 1))
mtxslc_f[ik].append(slice(idx0[aux][0], idx0[aux][-1] + 1))
ptr += aux[-1] + 1
mtxs = {}
rhss = {}
ress = {}
for ir in mtxi.iterkeys():
rhss[ir] = nm.zeros((nn[ir],), dtype=nm.float64)
ress[ir] = nm.zeros((nn[ir],), dtype=nm.float64)
for jr, idxr in enumerate(mtxslc_f[ir]):
rhss[ir][mtxslc_s[ir][jr]] = rhs[idxr]
for ic in mtxi.iterkeys():
mtxid = '%s%s' % (ir, ic)
mtxs[mtxid] = nm.zeros((nn[ir], nn[ic]), dtype=nm.float64)
for jr, idxr in enumerate(mtxslc_f[ir]):
for jc, idxc in enumerate(mtxslc_f[ic]):
iir = mtxslc_s[ir][jr]
iic = mtxslc_s[ic][jc]
mtxs[mtxid][iir, iic] = mtx._get_submatrix(idxr, idxc).todense()
self.orig_conf.function(ress, mtxs, rhss, nn)
res = nm.zeros_like(rhs)
for ir in mtxi.iterkeys():
for jr, idxr in enumerate(mtxslc_f[ir]):
res[idxr] = ress[ir][mtxslc_s[ir][jr]]
return res
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class SchurComplement(SchurGeneralized):
r"""
Schur complement.
Solution of the linear system
.. math::
\left[ \begin{array}{cc}
A & B \\
C & D \end{array} \right]
\cdot
\left[ \begin{array}{c}
u \\
v \end{array} \right]
=
\left[ \begin{array}{c}
f \\
g \end{array} \right]
is obtained by solving the following equation:
.. math::
(D - C A^{-1} B) \cdot v = g - C A^{-1} f
variable(s) :math:`u` are specified in "eliminate" list,
variable(s) :math:`v` are specified in "keep" list,
See: http://en.wikipedia.org/wiki/Schur_complement
"""
name = 'ls.schur_complement'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.schur_complement',
{'eliminate': ['displacement'],
'keep': ['pressure'],
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
conf.blocks = {'1': get('eliminate', None,
'missing "eliminate" in options!'),
'2': get('keep', None,
'missing "keep" in options!'),}
conf.function = SchurComplement.schur_fun
common = SchurGeneralized.process_conf(conf, kwargs)
return common
@staticmethod
def schur_fun(res, mtx, rhs, nn):
import scipy.sparse as scs
import scipy.sparse.linalg as sls
invA = sls.splu(scs.csc_matrix(mtx['11']))
invAB = nm.zeros_like(mtx['12'])
for j, b in enumerate(mtx['12'].T):
invAB[:,j] = invA.solve(b)
invAf = invA.solve(rhs['1'])
spC = scs.csc_matrix(mtx['21'])
k_rhs = rhs['2'] - spC * invAf
res['2'] = sls.spsolve(scs.csc_matrix(mtx['22'] - spC * invAB), k_rhs)
res['1'] = invAf - nm.dot(invAB, res['2'])
class MultiProblem(ScipyDirect):
r"""
Conjugate multiple problems.
Allows to define conjugate multiple problems.
"""
name = 'ls.cm_pb'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.cm_pb',
{'others': ['acoustic_subproblem.py'],
'coupling_variables': ['g'],
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
common = ScipyDirect.process_conf(conf, kwargs)
return Struct(others=get('others', None,
'missing "others" in options!'),
coupling_variables=get('coupling_variables', None,
'missing "coupling_variables"!'),
needs_problem_instance=True) + common
def __init__(self, conf, problem, **kwargs):
from sfepy.discrete.state import State
from sfepy.discrete import Problem
from sfepy.base.conf import ProblemConf, get_standard_keywords
from scipy.spatial import cKDTree as KDTree
ScipyDirect.__init__(self, conf, **kwargs)
# init subproblems
pb_vars = problem.get_variables()
# get "master" DofInfo and last index
pb_adi_indx = problem.equations.variables.adi.indx
self.adi_indx = pb_adi_indx.copy()
last_indx = -1
for ii in self.adi_indx.itervalues():
last_indx = nm.max([last_indx, ii.stop])
# coupling variables
self.cvars_to_pb = {}
for jj in conf.coupling_variables:
self.cvars_to_pb[jj] = [None, None]
if jj in pb_vars.names:
if pb_vars[jj].dual_var_name is not None:
self.cvars_to_pb[jj][0] = -1
else:
self.cvars_to_pb[jj][1] = -1
# init subproblems
self.subpb = []
required, other = | get_standard_keywords() | sfepy.base.conf.get_standard_keywords |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs)
ksp = PETSc.KSP().create()
ksp.setType( self.conf.method )
ksp.getPC().setType( self.conf.precond )
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
self.ksp = ksp
self.converged_reasons = {}
for key, val in ksp.ConvergedReason.__dict__.iteritems():
if isinstance(val, int):
self.converged_reasons[val] = key
def set_matrix( self, mtx ):
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat().createAIJ( mtx.shape,
csr = (mtx.indptr,
mtx.indices,
mtx.data) )
sol, rhs = pmtx.getVecs()
return pmtx, sol, rhs
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
ksp.setInitialGuessNonzero(True)
prhs[...] = rhs
ksp.solve(prhs, psol)
sol = psol[...].copy()
output('%s(%s) convergence: %s (%s)'
% (self.conf.method, self.conf.precond,
ksp.reason, self.converged_reasons[ksp.reason]))
return sol
class PETScParallelKrylovSolver(PETScKrylovSolver):
"""
PETSc Krylov subspace solver able to run in parallel by storing the
system to disk and running a separate script via `mpiexec`.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc_parallel'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1 = {
'name' : 'ls',
'kind' : 'ls.petsc_parallel',
'log_dir' : '.', # Store logs here.
'n_proc' : 5, # Number of processes to run.
'method' : 'cg', # ksp_type
'precond' : 'bjacobi', # pc_type
'sub_precond' : 'icc', # sub_pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = PETScKrylovSolver.process_conf(conf, kwargs)
return Struct(log_dir=get('log_dir', '.'),
n_proc=get('n_proc', 1),
sub_precond=get('sub_precond', 'icc')) + common
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import os, sys, shutil, tempfile
from sfepy import base_dir
from sfepy.base.ioutils import ensure_path
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
petsc = self.petsc
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
output_dir = tempfile.mkdtemp()
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
sol0_filename = os.path.join(output_dir, 'sol0.dat')
else:
sol0_filename = ''
prhs[...] = rhs
script_filename = os.path.join(base_dir, 'solvers/petsc_worker.py')
mtx_filename = os.path.join(output_dir, 'mtx.dat')
rhs_filename = os.path.join(output_dir, 'rhs.dat')
sol_filename = os.path.join(output_dir, 'sol.dat')
status_filename = os.path.join(output_dir, 'status.txt')
log_filename = os.path.join(self.conf.log_dir, 'sol.log')
ensure_path(log_filename)
output('storing system to %s...' % output_dir)
tt = time.clock()
view_mtx = petsc.Viewer().createBinary(mtx_filename, mode='w')
view_rhs = petsc.Viewer().createBinary(rhs_filename, mode='w')
pmtx.view(view_mtx)
prhs.view(view_rhs)
if sol0_filename:
view_sol0 = petsc.Viewer().createBinary(sol0_filename, mode='w')
psol.view(view_sol0)
output('...done in %.2f s' % (time.clock() - tt))
command = [
'mpiexec -n %d' % self.conf.n_proc,
sys.executable, script_filename,
'-mtx %s' % mtx_filename, '-rhs %s' % rhs_filename,
'-sol0 %s' % sol0_filename, '-sol %s' % sol_filename,
'-status %s' % status_filename,
'-ksp_type %s' % self.conf.method,
'-pc_type %s' % self.conf.precond,
'-sub_pc_type %s' % self.conf.sub_precond,
'-ksp_atol %.3e' % self.conf.eps_a,
'-ksp_rtol %.3e' % self.conf.eps_r,
'-ksp_max_it %d' % self.conf.i_max,
'-ksp_monitor %s' % log_filename,
'-ksp_view %s' % log_filename,
]
if self.conf.precond_side is not None:
command.append('-ksp_pc_side %s' % self.conf.precond_side)
out = os.system(" ".join(command))
assert_(out == 0)
output('reading solution...')
tt = time.clock()
view_sol = self.petsc.Viewer().createBinary(sol_filename, mode='r')
psol = petsc.Vec().load(view_sol)
fd = open(status_filename, 'r')
line = fd.readline().split()
reason = int(line[0])
elapsed = float(line[1])
fd.close()
output('...done in %.2f s' % (time.clock() - tt))
sol = psol[...].copy()
output('%s(%s, %s/proc) convergence: %s (%s)'
% (self.conf.method, self.conf.precond, self.conf.sub_precond,
reason, self.converged_reasons[reason]))
output('elapsed: %.2f [s]' % elapsed)
shutil.rmtree(output_dir)
return sol
class SchurGeneralized(ScipyDirect):
r"""
Generalized Schur complement.
Defines the matrix blocks and calls user defined function.
"""
name = 'ls.schur_generalized'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.schur_generalized',
{'blocks':
{'u': ['displacement1', 'displacement2'],
'v': ['velocity1', 'velocity2'],
'w': ['pressure1', 'pressure2'],
},
'function': my_schur,
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
common = ScipyDirect.process_conf(conf, kwargs)
return Struct(blocks=get('blocks', None,
'missing "blocks" in options!'),
function=get('function', None,
'missing "function" in options!'),
needs_problem_instance=True) + common
def __init__(self, conf, **kwargs):
from sfepy.discrete.state import State
ScipyDirect.__init__(self, conf, **kwargs)
equations = self.problem.equations
aux_state = State(equations.variables)
conf.idxs = {}
for bk, bv in conf.blocks.iteritems():
aux_state.fill(0.0)
for jj in bv:
idx = equations.variables.di.indx[jj]
aux_state.vec[idx] = nm.nan
aux_state.apply_ebc()
vec0 = aux_state.get_reduced()
conf.idxs[bk] = nm.where(nm.isnan(vec0))[0]
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
mtxi= self.orig_conf.idxs
mtxslc_s = {}
mtxslc_f = {}
nn = {}
for ik, iv in mtxi.iteritems():
ptr = 0
nn[ik] = len(iv)
mtxslc_s[ik] = []
mtxslc_f[ik] = []
while ptr < nn[ik]:
idx0 = iv[ptr:]
idxrange = nm.arange(idx0[0], idx0[0] + len(idx0))
aux = nm.where(idx0 == idxrange)[0]
mtxslc_s[ik].append(slice(ptr + aux[0], ptr + aux[-1] + 1))
mtxslc_f[ik].append(slice(idx0[aux][0], idx0[aux][-1] + 1))
ptr += aux[-1] + 1
mtxs = {}
rhss = {}
ress = {}
for ir in mtxi.iterkeys():
rhss[ir] = nm.zeros((nn[ir],), dtype=nm.float64)
ress[ir] = nm.zeros((nn[ir],), dtype=nm.float64)
for jr, idxr in enumerate(mtxslc_f[ir]):
rhss[ir][mtxslc_s[ir][jr]] = rhs[idxr]
for ic in mtxi.iterkeys():
mtxid = '%s%s' % (ir, ic)
mtxs[mtxid] = nm.zeros((nn[ir], nn[ic]), dtype=nm.float64)
for jr, idxr in enumerate(mtxslc_f[ir]):
for jc, idxc in enumerate(mtxslc_f[ic]):
iir = mtxslc_s[ir][jr]
iic = mtxslc_s[ic][jc]
mtxs[mtxid][iir, iic] = mtx._get_submatrix(idxr, idxc).todense()
self.orig_conf.function(ress, mtxs, rhss, nn)
res = nm.zeros_like(rhs)
for ir in mtxi.iterkeys():
for jr, idxr in enumerate(mtxslc_f[ir]):
res[idxr] = ress[ir][mtxslc_s[ir][jr]]
return res
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class SchurComplement(SchurGeneralized):
r"""
Schur complement.
Solution of the linear system
.. math::
\left[ \begin{array}{cc}
A & B \\
C & D \end{array} \right]
\cdot
\left[ \begin{array}{c}
u \\
v \end{array} \right]
=
\left[ \begin{array}{c}
f \\
g \end{array} \right]
is obtained by solving the following equation:
.. math::
(D - C A^{-1} B) \cdot v = g - C A^{-1} f
variable(s) :math:`u` are specified in "eliminate" list,
variable(s) :math:`v` are specified in "keep" list,
See: http://en.wikipedia.org/wiki/Schur_complement
"""
name = 'ls.schur_complement'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.schur_complement',
{'eliminate': ['displacement'],
'keep': ['pressure'],
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
conf.blocks = {'1': get('eliminate', None,
'missing "eliminate" in options!'),
'2': get('keep', None,
'missing "keep" in options!'),}
conf.function = SchurComplement.schur_fun
common = SchurGeneralized.process_conf(conf, kwargs)
return common
@staticmethod
def schur_fun(res, mtx, rhs, nn):
import scipy.sparse as scs
import scipy.sparse.linalg as sls
invA = sls.splu(scs.csc_matrix(mtx['11']))
invAB = nm.zeros_like(mtx['12'])
for j, b in enumerate(mtx['12'].T):
invAB[:,j] = invA.solve(b)
invAf = invA.solve(rhs['1'])
spC = scs.csc_matrix(mtx['21'])
k_rhs = rhs['2'] - spC * invAf
res['2'] = sls.spsolve(scs.csc_matrix(mtx['22'] - spC * invAB), k_rhs)
res['1'] = invAf - nm.dot(invAB, res['2'])
class MultiProblem(ScipyDirect):
r"""
Conjugate multiple problems.
Allows to define conjugate multiple problems.
"""
name = 'ls.cm_pb'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.cm_pb',
{'others': ['acoustic_subproblem.py'],
'coupling_variables': ['g'],
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
common = ScipyDirect.process_conf(conf, kwargs)
return Struct(others=get('others', None,
'missing "others" in options!'),
coupling_variables=get('coupling_variables', None,
'missing "coupling_variables"!'),
needs_problem_instance=True) + common
def __init__(self, conf, problem, **kwargs):
from sfepy.discrete.state import State
from sfepy.discrete import Problem
from sfepy.base.conf import ProblemConf, get_standard_keywords
from scipy.spatial import cKDTree as KDTree
ScipyDirect.__init__(self, conf, **kwargs)
# init subproblems
pb_vars = problem.get_variables()
# get "master" DofInfo and last index
pb_adi_indx = problem.equations.variables.adi.indx
self.adi_indx = pb_adi_indx.copy()
last_indx = -1
for ii in self.adi_indx.itervalues():
last_indx = nm.max([last_indx, ii.stop])
# coupling variables
self.cvars_to_pb = {}
for jj in conf.coupling_variables:
self.cvars_to_pb[jj] = [None, None]
if jj in pb_vars.names:
if pb_vars[jj].dual_var_name is not None:
self.cvars_to_pb[jj][0] = -1
else:
self.cvars_to_pb[jj][1] = -1
# init subproblems
self.subpb = []
required, other = get_standard_keywords()
master_prefix = | output.get_output_prefix() | sfepy.base.base.output.get_output_prefix |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
| assert_(x0.shape[0] == rhs.shape[0]) | sfepy.base.base.assert_ |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs)
ksp = PETSc.KSP().create()
ksp.setType( self.conf.method )
ksp.getPC().setType( self.conf.precond )
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
self.ksp = ksp
self.converged_reasons = {}
for key, val in ksp.ConvergedReason.__dict__.iteritems():
if isinstance(val, int):
self.converged_reasons[val] = key
def set_matrix( self, mtx ):
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat().createAIJ( mtx.shape,
csr = (mtx.indptr,
mtx.indices,
mtx.data) )
sol, rhs = pmtx.getVecs()
return pmtx, sol, rhs
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
ksp.setInitialGuessNonzero(True)
prhs[...] = rhs
ksp.solve(prhs, psol)
sol = psol[...].copy()
output('%s(%s) convergence: %s (%s)'
% (self.conf.method, self.conf.precond,
ksp.reason, self.converged_reasons[ksp.reason]))
return sol
class PETScParallelKrylovSolver(PETScKrylovSolver):
"""
PETSc Krylov subspace solver able to run in parallel by storing the
system to disk and running a separate script via `mpiexec`.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc_parallel'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1 = {
'name' : 'ls',
'kind' : 'ls.petsc_parallel',
'log_dir' : '.', # Store logs here.
'n_proc' : 5, # Number of processes to run.
'method' : 'cg', # ksp_type
'precond' : 'bjacobi', # pc_type
'sub_precond' : 'icc', # sub_pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = PETScKrylovSolver.process_conf(conf, kwargs)
return Struct(log_dir=get('log_dir', '.'),
n_proc=get('n_proc', 1),
sub_precond=get('sub_precond', 'icc')) + common
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import os, sys, shutil, tempfile
from sfepy import base_dir
from sfepy.base.ioutils import ensure_path
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
petsc = self.petsc
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
output_dir = tempfile.mkdtemp()
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
sol0_filename = os.path.join(output_dir, 'sol0.dat')
else:
sol0_filename = ''
prhs[...] = rhs
script_filename = os.path.join(base_dir, 'solvers/petsc_worker.py')
mtx_filename = os.path.join(output_dir, 'mtx.dat')
rhs_filename = os.path.join(output_dir, 'rhs.dat')
sol_filename = os.path.join(output_dir, 'sol.dat')
status_filename = os.path.join(output_dir, 'status.txt')
log_filename = os.path.join(self.conf.log_dir, 'sol.log')
ensure_path(log_filename)
output('storing system to %s...' % output_dir)
tt = time.clock()
view_mtx = petsc.Viewer().createBinary(mtx_filename, mode='w')
view_rhs = petsc.Viewer().createBinary(rhs_filename, mode='w')
pmtx.view(view_mtx)
prhs.view(view_rhs)
if sol0_filename:
view_sol0 = petsc.Viewer().createBinary(sol0_filename, mode='w')
psol.view(view_sol0)
output('...done in %.2f s' % (time.clock() - tt))
command = [
'mpiexec -n %d' % self.conf.n_proc,
sys.executable, script_filename,
'-mtx %s' % mtx_filename, '-rhs %s' % rhs_filename,
'-sol0 %s' % sol0_filename, '-sol %s' % sol_filename,
'-status %s' % status_filename,
'-ksp_type %s' % self.conf.method,
'-pc_type %s' % self.conf.precond,
'-sub_pc_type %s' % self.conf.sub_precond,
'-ksp_atol %.3e' % self.conf.eps_a,
'-ksp_rtol %.3e' % self.conf.eps_r,
'-ksp_max_it %d' % self.conf.i_max,
'-ksp_monitor %s' % log_filename,
'-ksp_view %s' % log_filename,
]
if self.conf.precond_side is not None:
command.append('-ksp_pc_side %s' % self.conf.precond_side)
out = os.system(" ".join(command))
assert_(out == 0)
output('reading solution...')
tt = time.clock()
view_sol = self.petsc.Viewer().createBinary(sol_filename, mode='r')
psol = petsc.Vec().load(view_sol)
fd = open(status_filename, 'r')
line = fd.readline().split()
reason = int(line[0])
elapsed = float(line[1])
fd.close()
output('...done in %.2f s' % (time.clock() - tt))
sol = psol[...].copy()
output('%s(%s, %s/proc) convergence: %s (%s)'
% (self.conf.method, self.conf.precond, self.conf.sub_precond,
reason, self.converged_reasons[reason]))
output('elapsed: %.2f [s]' % elapsed)
shutil.rmtree(output_dir)
return sol
class SchurGeneralized(ScipyDirect):
r"""
Generalized Schur complement.
Defines the matrix blocks and calls user defined function.
"""
name = 'ls.schur_generalized'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.schur_generalized',
{'blocks':
{'u': ['displacement1', 'displacement2'],
'v': ['velocity1', 'velocity2'],
'w': ['pressure1', 'pressure2'],
},
'function': my_schur,
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
common = ScipyDirect.process_conf(conf, kwargs)
return Struct(blocks=get('blocks', None,
'missing "blocks" in options!'),
function=get('function', None,
'missing "function" in options!'),
needs_problem_instance=True) + common
def __init__(self, conf, **kwargs):
from sfepy.discrete.state import State
ScipyDirect.__init__(self, conf, **kwargs)
equations = self.problem.equations
aux_state = State(equations.variables)
conf.idxs = {}
for bk, bv in conf.blocks.iteritems():
aux_state.fill(0.0)
for jj in bv:
idx = equations.variables.di.indx[jj]
aux_state.vec[idx] = nm.nan
aux_state.apply_ebc()
vec0 = aux_state.get_reduced()
conf.idxs[bk] = nm.where(nm.isnan(vec0))[0]
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
mtxi= self.orig_conf.idxs
mtxslc_s = {}
mtxslc_f = {}
nn = {}
for ik, iv in mtxi.iteritems():
ptr = 0
nn[ik] = len(iv)
mtxslc_s[ik] = []
mtxslc_f[ik] = []
while ptr < nn[ik]:
idx0 = iv[ptr:]
idxrange = nm.arange(idx0[0], idx0[0] + len(idx0))
aux = nm.where(idx0 == idxrange)[0]
mtxslc_s[ik].append(slice(ptr + aux[0], ptr + aux[-1] + 1))
mtxslc_f[ik].append(slice(idx0[aux][0], idx0[aux][-1] + 1))
ptr += aux[-1] + 1
mtxs = {}
rhss = {}
ress = {}
for ir in mtxi.iterkeys():
rhss[ir] = nm.zeros((nn[ir],), dtype=nm.float64)
ress[ir] = nm.zeros((nn[ir],), dtype=nm.float64)
for jr, idxr in enumerate(mtxslc_f[ir]):
rhss[ir][mtxslc_s[ir][jr]] = rhs[idxr]
for ic in mtxi.iterkeys():
mtxid = '%s%s' % (ir, ic)
mtxs[mtxid] = nm.zeros((nn[ir], nn[ic]), dtype=nm.float64)
for jr, idxr in enumerate(mtxslc_f[ir]):
for jc, idxc in enumerate(mtxslc_f[ic]):
iir = mtxslc_s[ir][jr]
iic = mtxslc_s[ic][jc]
mtxs[mtxid][iir, iic] = mtx._get_submatrix(idxr, idxc).todense()
self.orig_conf.function(ress, mtxs, rhss, nn)
res = nm.zeros_like(rhs)
for ir in mtxi.iterkeys():
for jr, idxr in enumerate(mtxslc_f[ir]):
res[idxr] = ress[ir][mtxslc_s[ir][jr]]
return res
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class SchurComplement(SchurGeneralized):
r"""
Schur complement.
Solution of the linear system
.. math::
\left[ \begin{array}{cc}
A & B \\
C & D \end{array} \right]
\cdot
\left[ \begin{array}{c}
u \\
v \end{array} \right]
=
\left[ \begin{array}{c}
f \\
g \end{array} \right]
is obtained by solving the following equation:
.. math::
(D - C A^{-1} B) \cdot v = g - C A^{-1} f
variable(s) :math:`u` are specified in "eliminate" list,
variable(s) :math:`v` are specified in "keep" list,
See: http://en.wikipedia.org/wiki/Schur_complement
"""
name = 'ls.schur_complement'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.schur_complement',
{'eliminate': ['displacement'],
'keep': ['pressure'],
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
conf.blocks = {'1': get('eliminate', None,
'missing "eliminate" in options!'),
'2': get('keep', None,
'missing "keep" in options!'),}
conf.function = SchurComplement.schur_fun
common = SchurGeneralized.process_conf(conf, kwargs)
return common
@staticmethod
def schur_fun(res, mtx, rhs, nn):
import scipy.sparse as scs
import scipy.sparse.linalg as sls
invA = sls.splu(scs.csc_matrix(mtx['11']))
invAB = nm.zeros_like(mtx['12'])
for j, b in enumerate(mtx['12'].T):
invAB[:,j] = invA.solve(b)
invAf = invA.solve(rhs['1'])
spC = scs.csc_matrix(mtx['21'])
k_rhs = rhs['2'] - spC * invAf
res['2'] = sls.spsolve(scs.csc_matrix(mtx['22'] - spC * invAB), k_rhs)
res['1'] = invAf - nm.dot(invAB, res['2'])
class MultiProblem(ScipyDirect):
r"""
Conjugate multiple problems.
Allows to define conjugate multiple problems.
"""
name = 'ls.cm_pb'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.cm_pb',
{'others': ['acoustic_subproblem.py'],
'coupling_variables': ['g'],
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
common = ScipyDirect.process_conf(conf, kwargs)
return Struct(others=get('others', None,
'missing "others" in options!'),
coupling_variables=get('coupling_variables', None,
'missing "coupling_variables"!'),
needs_problem_instance=True) + common
def __init__(self, conf, problem, **kwargs):
from sfepy.discrete.state import State
from sfepy.discrete import Problem
from sfepy.base.conf import ProblemConf, get_standard_keywords
from scipy.spatial import cKDTree as KDTree
ScipyDirect.__init__(self, conf, **kwargs)
# init subproblems
pb_vars = problem.get_variables()
# get "master" DofInfo and last index
pb_adi_indx = problem.equations.variables.adi.indx
self.adi_indx = pb_adi_indx.copy()
last_indx = -1
for ii in self.adi_indx.itervalues():
last_indx = nm.max([last_indx, ii.stop])
# coupling variables
self.cvars_to_pb = {}
for jj in conf.coupling_variables:
self.cvars_to_pb[jj] = [None, None]
if jj in pb_vars.names:
if pb_vars[jj].dual_var_name is not None:
self.cvars_to_pb[jj][0] = -1
else:
self.cvars_to_pb[jj][1] = -1
# init subproblems
self.subpb = []
required, other = get_standard_keywords()
master_prefix = output.get_output_prefix()
for ii, ifname in enumerate(conf.others):
sub_prefix = master_prefix[:-1] + '-sub%d:' % (ii + 1)
| output.set_output_prefix(sub_prefix) | sfepy.base.base.output.set_output_prefix |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs)
ksp = PETSc.KSP().create()
ksp.setType( self.conf.method )
ksp.getPC().setType( self.conf.precond )
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
self.ksp = ksp
self.converged_reasons = {}
for key, val in ksp.ConvergedReason.__dict__.iteritems():
if isinstance(val, int):
self.converged_reasons[val] = key
def set_matrix( self, mtx ):
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat().createAIJ( mtx.shape,
csr = (mtx.indptr,
mtx.indices,
mtx.data) )
sol, rhs = pmtx.getVecs()
return pmtx, sol, rhs
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
ksp.setInitialGuessNonzero(True)
prhs[...] = rhs
ksp.solve(prhs, psol)
sol = psol[...].copy()
output('%s(%s) convergence: %s (%s)'
% (self.conf.method, self.conf.precond,
ksp.reason, self.converged_reasons[ksp.reason]))
return sol
class PETScParallelKrylovSolver(PETScKrylovSolver):
"""
PETSc Krylov subspace solver able to run in parallel by storing the
system to disk and running a separate script via `mpiexec`.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc_parallel'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1 = {
'name' : 'ls',
'kind' : 'ls.petsc_parallel',
'log_dir' : '.', # Store logs here.
'n_proc' : 5, # Number of processes to run.
'method' : 'cg', # ksp_type
'precond' : 'bjacobi', # pc_type
'sub_precond' : 'icc', # sub_pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = PETScKrylovSolver.process_conf(conf, kwargs)
return Struct(log_dir=get('log_dir', '.'),
n_proc=get('n_proc', 1),
sub_precond=get('sub_precond', 'icc')) + common
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import os, sys, shutil, tempfile
from sfepy import base_dir
from sfepy.base.ioutils import ensure_path
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
petsc = self.petsc
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
output_dir = tempfile.mkdtemp()
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
sol0_filename = os.path.join(output_dir, 'sol0.dat')
else:
sol0_filename = ''
prhs[...] = rhs
script_filename = os.path.join(base_dir, 'solvers/petsc_worker.py')
mtx_filename = os.path.join(output_dir, 'mtx.dat')
rhs_filename = os.path.join(output_dir, 'rhs.dat')
sol_filename = os.path.join(output_dir, 'sol.dat')
status_filename = os.path.join(output_dir, 'status.txt')
log_filename = os.path.join(self.conf.log_dir, 'sol.log')
ensure_path(log_filename)
output('storing system to %s...' % output_dir)
tt = time.clock()
view_mtx = petsc.Viewer().createBinary(mtx_filename, mode='w')
view_rhs = petsc.Viewer().createBinary(rhs_filename, mode='w')
pmtx.view(view_mtx)
prhs.view(view_rhs)
if sol0_filename:
view_sol0 = petsc.Viewer().createBinary(sol0_filename, mode='w')
psol.view(view_sol0)
output('...done in %.2f s' % (time.clock() - tt))
command = [
'mpiexec -n %d' % self.conf.n_proc,
sys.executable, script_filename,
'-mtx %s' % mtx_filename, '-rhs %s' % rhs_filename,
'-sol0 %s' % sol0_filename, '-sol %s' % sol_filename,
'-status %s' % status_filename,
'-ksp_type %s' % self.conf.method,
'-pc_type %s' % self.conf.precond,
'-sub_pc_type %s' % self.conf.sub_precond,
'-ksp_atol %.3e' % self.conf.eps_a,
'-ksp_rtol %.3e' % self.conf.eps_r,
'-ksp_max_it %d' % self.conf.i_max,
'-ksp_monitor %s' % log_filename,
'-ksp_view %s' % log_filename,
]
if self.conf.precond_side is not None:
command.append('-ksp_pc_side %s' % self.conf.precond_side)
out = os.system(" ".join(command))
assert_(out == 0)
output('reading solution...')
tt = time.clock()
view_sol = self.petsc.Viewer().createBinary(sol_filename, mode='r')
psol = petsc.Vec().load(view_sol)
fd = open(status_filename, 'r')
line = fd.readline().split()
reason = int(line[0])
elapsed = float(line[1])
fd.close()
output('...done in %.2f s' % (time.clock() - tt))
sol = psol[...].copy()
output('%s(%s, %s/proc) convergence: %s (%s)'
% (self.conf.method, self.conf.precond, self.conf.sub_precond,
reason, self.converged_reasons[reason]))
output('elapsed: %.2f [s]' % elapsed)
shutil.rmtree(output_dir)
return sol
class SchurGeneralized(ScipyDirect):
r"""
Generalized Schur complement.
Defines the matrix blocks and calls user defined function.
"""
name = 'ls.schur_generalized'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.schur_generalized',
{'blocks':
{'u': ['displacement1', 'displacement2'],
'v': ['velocity1', 'velocity2'],
'w': ['pressure1', 'pressure2'],
},
'function': my_schur,
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
common = ScipyDirect.process_conf(conf, kwargs)
return Struct(blocks=get('blocks', None,
'missing "blocks" in options!'),
function=get('function', None,
'missing "function" in options!'),
needs_problem_instance=True) + common
def __init__(self, conf, **kwargs):
from sfepy.discrete.state import State
ScipyDirect.__init__(self, conf, **kwargs)
equations = self.problem.equations
aux_state = State(equations.variables)
conf.idxs = {}
for bk, bv in conf.blocks.iteritems():
aux_state.fill(0.0)
for jj in bv:
idx = equations.variables.di.indx[jj]
aux_state.vec[idx] = nm.nan
aux_state.apply_ebc()
vec0 = aux_state.get_reduced()
conf.idxs[bk] = nm.where(nm.isnan(vec0))[0]
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
mtxi= self.orig_conf.idxs
mtxslc_s = {}
mtxslc_f = {}
nn = {}
for ik, iv in mtxi.iteritems():
ptr = 0
nn[ik] = len(iv)
mtxslc_s[ik] = []
mtxslc_f[ik] = []
while ptr < nn[ik]:
idx0 = iv[ptr:]
idxrange = nm.arange(idx0[0], idx0[0] + len(idx0))
aux = nm.where(idx0 == idxrange)[0]
mtxslc_s[ik].append(slice(ptr + aux[0], ptr + aux[-1] + 1))
mtxslc_f[ik].append(slice(idx0[aux][0], idx0[aux][-1] + 1))
ptr += aux[-1] + 1
mtxs = {}
rhss = {}
ress = {}
for ir in mtxi.iterkeys():
rhss[ir] = nm.zeros((nn[ir],), dtype=nm.float64)
ress[ir] = nm.zeros((nn[ir],), dtype=nm.float64)
for jr, idxr in enumerate(mtxslc_f[ir]):
rhss[ir][mtxslc_s[ir][jr]] = rhs[idxr]
for ic in mtxi.iterkeys():
mtxid = '%s%s' % (ir, ic)
mtxs[mtxid] = nm.zeros((nn[ir], nn[ic]), dtype=nm.float64)
for jr, idxr in enumerate(mtxslc_f[ir]):
for jc, idxc in enumerate(mtxslc_f[ic]):
iir = mtxslc_s[ir][jr]
iic = mtxslc_s[ic][jc]
mtxs[mtxid][iir, iic] = mtx._get_submatrix(idxr, idxc).todense()
self.orig_conf.function(ress, mtxs, rhss, nn)
res = nm.zeros_like(rhs)
for ir in mtxi.iterkeys():
for jr, idxr in enumerate(mtxslc_f[ir]):
res[idxr] = ress[ir][mtxslc_s[ir][jr]]
return res
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class SchurComplement(SchurGeneralized):
r"""
Schur complement.
Solution of the linear system
.. math::
\left[ \begin{array}{cc}
A & B \\
C & D \end{array} \right]
\cdot
\left[ \begin{array}{c}
u \\
v \end{array} \right]
=
\left[ \begin{array}{c}
f \\
g \end{array} \right]
is obtained by solving the following equation:
.. math::
(D - C A^{-1} B) \cdot v = g - C A^{-1} f
variable(s) :math:`u` are specified in "eliminate" list,
variable(s) :math:`v` are specified in "keep" list,
See: http://en.wikipedia.org/wiki/Schur_complement
"""
name = 'ls.schur_complement'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.schur_complement',
{'eliminate': ['displacement'],
'keep': ['pressure'],
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
conf.blocks = {'1': get('eliminate', None,
'missing "eliminate" in options!'),
'2': get('keep', None,
'missing "keep" in options!'),}
conf.function = SchurComplement.schur_fun
common = SchurGeneralized.process_conf(conf, kwargs)
return common
@staticmethod
def schur_fun(res, mtx, rhs, nn):
import scipy.sparse as scs
import scipy.sparse.linalg as sls
invA = sls.splu(scs.csc_matrix(mtx['11']))
invAB = nm.zeros_like(mtx['12'])
for j, b in enumerate(mtx['12'].T):
invAB[:,j] = invA.solve(b)
invAf = invA.solve(rhs['1'])
spC = scs.csc_matrix(mtx['21'])
k_rhs = rhs['2'] - spC * invAf
res['2'] = sls.spsolve(scs.csc_matrix(mtx['22'] - spC * invAB), k_rhs)
res['1'] = invAf - nm.dot(invAB, res['2'])
class MultiProblem(ScipyDirect):
r"""
Conjugate multiple problems.
Allows to define conjugate multiple problems.
"""
name = 'ls.cm_pb'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.cm_pb',
{'others': ['acoustic_subproblem.py'],
'coupling_variables': ['g'],
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
common = ScipyDirect.process_conf(conf, kwargs)
return Struct(others=get('others', None,
'missing "others" in options!'),
coupling_variables=get('coupling_variables', None,
'missing "coupling_variables"!'),
needs_problem_instance=True) + common
def __init__(self, conf, problem, **kwargs):
from sfepy.discrete.state import State
from sfepy.discrete import Problem
from sfepy.base.conf import ProblemConf, get_standard_keywords
from scipy.spatial import cKDTree as KDTree
ScipyDirect.__init__(self, conf, **kwargs)
# init subproblems
pb_vars = problem.get_variables()
# get "master" DofInfo and last index
pb_adi_indx = problem.equations.variables.adi.indx
self.adi_indx = pb_adi_indx.copy()
last_indx = -1
for ii in self.adi_indx.itervalues():
last_indx = nm.max([last_indx, ii.stop])
# coupling variables
self.cvars_to_pb = {}
for jj in conf.coupling_variables:
self.cvars_to_pb[jj] = [None, None]
if jj in pb_vars.names:
if pb_vars[jj].dual_var_name is not None:
self.cvars_to_pb[jj][0] = -1
else:
self.cvars_to_pb[jj][1] = -1
# init subproblems
self.subpb = []
required, other = get_standard_keywords()
master_prefix = output.get_output_prefix()
for ii, ifname in enumerate(conf.others):
sub_prefix = master_prefix[:-1] + '-sub%d:' % (ii + 1)
output.set_output_prefix(sub_prefix)
kwargs['master_problem'] = problem
confi = ProblemConf.from_file(ifname, required, other,
define_args=kwargs)
pbi = | Problem.from_conf(confi, init_equations=True) | sfepy.discrete.Problem.from_conf |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs)
ksp = PETSc.KSP().create()
ksp.setType( self.conf.method )
ksp.getPC().setType( self.conf.precond )
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
self.ksp = ksp
self.converged_reasons = {}
for key, val in ksp.ConvergedReason.__dict__.iteritems():
if isinstance(val, int):
self.converged_reasons[val] = key
def set_matrix( self, mtx ):
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat().createAIJ( mtx.shape,
csr = (mtx.indptr,
mtx.indices,
mtx.data) )
sol, rhs = pmtx.getVecs()
return pmtx, sol, rhs
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
ksp.setInitialGuessNonzero(True)
prhs[...] = rhs
ksp.solve(prhs, psol)
sol = psol[...].copy()
output('%s(%s) convergence: %s (%s)'
% (self.conf.method, self.conf.precond,
ksp.reason, self.converged_reasons[ksp.reason]))
return sol
class PETScParallelKrylovSolver(PETScKrylovSolver):
"""
PETSc Krylov subspace solver able to run in parallel by storing the
system to disk and running a separate script via `mpiexec`.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc_parallel'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1 = {
'name' : 'ls',
'kind' : 'ls.petsc_parallel',
'log_dir' : '.', # Store logs here.
'n_proc' : 5, # Number of processes to run.
'method' : 'cg', # ksp_type
'precond' : 'bjacobi', # pc_type
'sub_precond' : 'icc', # sub_pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = PETScKrylovSolver.process_conf(conf, kwargs)
return Struct(log_dir=get('log_dir', '.'),
n_proc=get('n_proc', 1),
sub_precond=get('sub_precond', 'icc')) + common
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import os, sys, shutil, tempfile
from sfepy import base_dir
from sfepy.base.ioutils import ensure_path
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
petsc = self.petsc
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
output_dir = tempfile.mkdtemp()
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
sol0_filename = os.path.join(output_dir, 'sol0.dat')
else:
sol0_filename = ''
prhs[...] = rhs
script_filename = os.path.join(base_dir, 'solvers/petsc_worker.py')
mtx_filename = os.path.join(output_dir, 'mtx.dat')
rhs_filename = os.path.join(output_dir, 'rhs.dat')
sol_filename = os.path.join(output_dir, 'sol.dat')
status_filename = os.path.join(output_dir, 'status.txt')
log_filename = os.path.join(self.conf.log_dir, 'sol.log')
ensure_path(log_filename)
output('storing system to %s...' % output_dir)
tt = time.clock()
view_mtx = petsc.Viewer().createBinary(mtx_filename, mode='w')
view_rhs = petsc.Viewer().createBinary(rhs_filename, mode='w')
pmtx.view(view_mtx)
prhs.view(view_rhs)
if sol0_filename:
view_sol0 = petsc.Viewer().createBinary(sol0_filename, mode='w')
psol.view(view_sol0)
output('...done in %.2f s' % (time.clock() - tt))
command = [
'mpiexec -n %d' % self.conf.n_proc,
sys.executable, script_filename,
'-mtx %s' % mtx_filename, '-rhs %s' % rhs_filename,
'-sol0 %s' % sol0_filename, '-sol %s' % sol_filename,
'-status %s' % status_filename,
'-ksp_type %s' % self.conf.method,
'-pc_type %s' % self.conf.precond,
'-sub_pc_type %s' % self.conf.sub_precond,
'-ksp_atol %.3e' % self.conf.eps_a,
'-ksp_rtol %.3e' % self.conf.eps_r,
'-ksp_max_it %d' % self.conf.i_max,
'-ksp_monitor %s' % log_filename,
'-ksp_view %s' % log_filename,
]
if self.conf.precond_side is not None:
command.append('-ksp_pc_side %s' % self.conf.precond_side)
out = os.system(" ".join(command))
assert_(out == 0)
output('reading solution...')
tt = time.clock()
view_sol = self.petsc.Viewer().createBinary(sol_filename, mode='r')
psol = petsc.Vec().load(view_sol)
fd = open(status_filename, 'r')
line = fd.readline().split()
reason = int(line[0])
elapsed = float(line[1])
fd.close()
output('...done in %.2f s' % (time.clock() - tt))
sol = psol[...].copy()
output('%s(%s, %s/proc) convergence: %s (%s)'
% (self.conf.method, self.conf.precond, self.conf.sub_precond,
reason, self.converged_reasons[reason]))
output('elapsed: %.2f [s]' % elapsed)
shutil.rmtree(output_dir)
return sol
class SchurGeneralized(ScipyDirect):
r"""
Generalized Schur complement.
Defines the matrix blocks and calls user defined function.
"""
name = 'ls.schur_generalized'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.schur_generalized',
{'blocks':
{'u': ['displacement1', 'displacement2'],
'v': ['velocity1', 'velocity2'],
'w': ['pressure1', 'pressure2'],
},
'function': my_schur,
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
common = ScipyDirect.process_conf(conf, kwargs)
return Struct(blocks=get('blocks', None,
'missing "blocks" in options!'),
function=get('function', None,
'missing "function" in options!'),
needs_problem_instance=True) + common
def __init__(self, conf, **kwargs):
from sfepy.discrete.state import State
ScipyDirect.__init__(self, conf, **kwargs)
equations = self.problem.equations
aux_state = State(equations.variables)
conf.idxs = {}
for bk, bv in conf.blocks.iteritems():
aux_state.fill(0.0)
for jj in bv:
idx = equations.variables.di.indx[jj]
aux_state.vec[idx] = nm.nan
aux_state.apply_ebc()
vec0 = aux_state.get_reduced()
conf.idxs[bk] = nm.where(nm.isnan(vec0))[0]
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
mtxi= self.orig_conf.idxs
mtxslc_s = {}
mtxslc_f = {}
nn = {}
for ik, iv in mtxi.iteritems():
ptr = 0
nn[ik] = len(iv)
mtxslc_s[ik] = []
mtxslc_f[ik] = []
while ptr < nn[ik]:
idx0 = iv[ptr:]
idxrange = nm.arange(idx0[0], idx0[0] + len(idx0))
aux = nm.where(idx0 == idxrange)[0]
mtxslc_s[ik].append(slice(ptr + aux[0], ptr + aux[-1] + 1))
mtxslc_f[ik].append(slice(idx0[aux][0], idx0[aux][-1] + 1))
ptr += aux[-1] + 1
mtxs = {}
rhss = {}
ress = {}
for ir in mtxi.iterkeys():
rhss[ir] = nm.zeros((nn[ir],), dtype=nm.float64)
ress[ir] = nm.zeros((nn[ir],), dtype=nm.float64)
for jr, idxr in enumerate(mtxslc_f[ir]):
rhss[ir][mtxslc_s[ir][jr]] = rhs[idxr]
for ic in mtxi.iterkeys():
mtxid = '%s%s' % (ir, ic)
mtxs[mtxid] = nm.zeros((nn[ir], nn[ic]), dtype=nm.float64)
for jr, idxr in enumerate(mtxslc_f[ir]):
for jc, idxc in enumerate(mtxslc_f[ic]):
iir = mtxslc_s[ir][jr]
iic = mtxslc_s[ic][jc]
mtxs[mtxid][iir, iic] = mtx._get_submatrix(idxr, idxc).todense()
self.orig_conf.function(ress, mtxs, rhss, nn)
res = nm.zeros_like(rhs)
for ir in mtxi.iterkeys():
for jr, idxr in enumerate(mtxslc_f[ir]):
res[idxr] = ress[ir][mtxslc_s[ir][jr]]
return res
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class SchurComplement(SchurGeneralized):
r"""
Schur complement.
Solution of the linear system
.. math::
\left[ \begin{array}{cc}
A & B \\
C & D \end{array} \right]
\cdot
\left[ \begin{array}{c}
u \\
v \end{array} \right]
=
\left[ \begin{array}{c}
f \\
g \end{array} \right]
is obtained by solving the following equation:
.. math::
(D - C A^{-1} B) \cdot v = g - C A^{-1} f
variable(s) :math:`u` are specified in "eliminate" list,
variable(s) :math:`v` are specified in "keep" list,
See: http://en.wikipedia.org/wiki/Schur_complement
"""
name = 'ls.schur_complement'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.schur_complement',
{'eliminate': ['displacement'],
'keep': ['pressure'],
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
conf.blocks = {'1': get('eliminate', None,
'missing "eliminate" in options!'),
'2': get('keep', None,
'missing "keep" in options!'),}
conf.function = SchurComplement.schur_fun
common = SchurGeneralized.process_conf(conf, kwargs)
return common
@staticmethod
def schur_fun(res, mtx, rhs, nn):
import scipy.sparse as scs
import scipy.sparse.linalg as sls
invA = sls.splu(scs.csc_matrix(mtx['11']))
invAB = nm.zeros_like(mtx['12'])
for j, b in enumerate(mtx['12'].T):
invAB[:,j] = invA.solve(b)
invAf = invA.solve(rhs['1'])
spC = scs.csc_matrix(mtx['21'])
k_rhs = rhs['2'] - spC * invAf
res['2'] = sls.spsolve(scs.csc_matrix(mtx['22'] - spC * invAB), k_rhs)
res['1'] = invAf - nm.dot(invAB, res['2'])
class MultiProblem(ScipyDirect):
r"""
Conjugate multiple problems.
Allows to define conjugate multiple problems.
"""
name = 'ls.cm_pb'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.cm_pb',
{'others': ['acoustic_subproblem.py'],
'coupling_variables': ['g'],
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
common = ScipyDirect.process_conf(conf, kwargs)
return Struct(others=get('others', None,
'missing "others" in options!'),
coupling_variables=get('coupling_variables', None,
'missing "coupling_variables"!'),
needs_problem_instance=True) + common
def __init__(self, conf, problem, **kwargs):
from sfepy.discrete.state import State
from sfepy.discrete import Problem
from sfepy.base.conf import ProblemConf, get_standard_keywords
from scipy.spatial import cKDTree as KDTree
ScipyDirect.__init__(self, conf, **kwargs)
# init subproblems
pb_vars = problem.get_variables()
# get "master" DofInfo and last index
pb_adi_indx = problem.equations.variables.adi.indx
self.adi_indx = pb_adi_indx.copy()
last_indx = -1
for ii in self.adi_indx.itervalues():
last_indx = nm.max([last_indx, ii.stop])
# coupling variables
self.cvars_to_pb = {}
for jj in conf.coupling_variables:
self.cvars_to_pb[jj] = [None, None]
if jj in pb_vars.names:
if pb_vars[jj].dual_var_name is not None:
self.cvars_to_pb[jj][0] = -1
else:
self.cvars_to_pb[jj][1] = -1
# init subproblems
self.subpb = []
required, other = get_standard_keywords()
master_prefix = output.get_output_prefix()
for ii, ifname in enumerate(conf.others):
sub_prefix = master_prefix[:-1] + '-sub%d:' % (ii + 1)
output.set_output_prefix(sub_prefix)
kwargs['master_problem'] = problem
confi = ProblemConf.from_file(ifname, required, other,
define_args=kwargs)
pbi = Problem.from_conf(confi, init_equations=True)
sti = | State(pbi.equations.variables) | sfepy.discrete.state.State |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
output('umfpack not available, using superlu!')
elif method != 'auto':
raise ValueError('uknown solution method! (%s)' % method)
if method != 'superlu' and is_umfpack:
self.sls.use_solver(useUmfpack=True,
assumeSortedIndices=True)
self.solve = None
if self._presolve() and hasattr(self, 'mtx'):
if self.mtx is not None:
self.solve = self.sls.factorized(self.mtx)
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
if self.solve is not None:
# Matrix is already prefactorized.
return self.solve(rhs)
else:
return self.sls.spsolve(mtx, rhs)
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class Umfpack(ScipyDirect):
"""This class stays for compatability with old input files. Use ScipyDirect
isntead."""
name = 'ls.umfpack'
def __init__(self, conf, **kwargs):
conf.method = 'umfpack'
ScipyDirect.__init__(self, conf, **kwargs)
##
# c: 22.02.2008
class ScipyIterative( LinearSolver ):
"""
Interface to SciPy iterative solvers.
Notes
-----
The `eps_r` tolerance is both absolute and relative - the solvers
stop when either the relative or the absolute residual is below it.
A preconditioner can be anything that the SciPy solvers accept (sparse
matrix, dense matrix, LinearOperator).
"""
name = 'ls.scipy_iterative'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_110 = {
'name' : 'ls110',
'kind' : 'ls.scipy_iterative',
'method' : 'cg',
'precond' : None,
'callback' : None,
'i_max' : 1000,
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', None),
callback=get('callback', None),
i_max=get('i_max', 100),
eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
def __init__(self, conf, **kwargs):
import scipy.sparse.linalg.isolve as la
LinearSolver.__init__(self, conf, **kwargs)
try:
solver = getattr( la, self.conf.method )
except AttributeError:
output( 'scipy solver %s does not exist!' % self.conf.method )
output( 'using cg instead' )
solver = la.cg
self.solver = solver
self.converged_reasons = {
0 : 'successful exit',
1 : 'number of iterations',
-1 : 'illegal input or breakdown',
}
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
precond = get_default(kwargs.get('precond', None), self.conf.precond)
callback = get_default(kwargs.get('callback', None), self.conf.callback)
if conf.method == 'qmr':
prec_args = {'M1' : precond, 'M2' : precond}
else:
prec_args = {'M' : precond}
sol, info = self.solver(mtx, rhs, x0=x0, tol=eps_r, maxiter=i_max,
callback=callback, **prec_args)
output('%s convergence: %s (%s)'
% (self.conf.method,
info, self.converged_reasons[nm.sign(info)]))
return sol
##
# c: 02.05.2008, r: 02.05.2008
class PyAMGSolver( LinearSolver ):
"""
Interface to PyAMG solvers.
Notes
-----
Uses relative convergence tolerance, i.e. eps_r is scaled by `||b||`.
"""
name = 'ls.pyamg'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_102 = {
'name' : 'ls102',
'kind' : 'ls.pyamg',
'method' : 'smoothed_aggregation_solver',
'accel' : 'cg'
'eps_r' : 1e-12,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'smoothed_aggregation_solver'),
accel = get('accel', None),
i_max=None, eps_a=None,
eps_r=get('eps_r', 1e-8)) + common
##
# c: 02.05.2008, r: 02.05.2008
def __init__( self, conf, **kwargs ):
try:
import pyamg
except ImportError:
msg = 'cannot import pyamg!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, mg=None, **kwargs)
try:
solver = getattr( pyamg, self.conf.method )
except AttributeError:
output( 'pyamg.%s does not exist!' % self.conf.method )
output( 'using pyamg.smoothed_aggregation_solver instead' )
solver = pyamg.smoothed_aggregation_solver
self.solver = solver
if hasattr( self, 'mtx' ):
if self.mtx is not None:
self.mg = self.solver( self.mtx )
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_r = get_default(eps_r, self.conf.eps_r)
if (self.mg is None) or (mtx is not self.mtx):
self.mg = self.solver(mtx)
self.mtx = mtx
sol = self.mg.solve(rhs, x0=x0, accel=conf.accel, tol=eps_r)
return sol
class PETScKrylovSolver( LinearSolver ):
"""
PETSc Krylov subspace solver.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc'
_precond_sides = {None : None, 'left' : 0, 'right' : 1, 'symmetric' : 2}
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_120 = {
'name' : 'ls120',
'kind' : 'ls.petsc',
'method' : 'cg', # ksp_type
'precond' : 'icc', # pc_type
'precond_side' : 'left', # ksp_pc_side
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'cg'),
precond=get('precond', 'icc'),
precond_side=get('precond_side', None),
i_max=get('i_max', 100),
eps_a=get('eps_a', 1e-8),
eps_r=get('eps_r', 1e-8),
eps_d=get('eps_d', 1e5)) + common
def __init__( self, conf, **kwargs ):
try:
import petsc4py
petsc4py.init([])
from petsc4py import PETSc
except ImportError:
msg = 'cannot import petsc4py!'
raise ImportError( msg )
LinearSolver.__init__(self, conf, petsc=PETSc, pmtx=None, **kwargs)
ksp = PETSc.KSP().create()
ksp.setType( self.conf.method )
ksp.getPC().setType( self.conf.precond )
side = self._precond_sides[self.conf.precond_side]
if side is not None:
ksp.setPCSide(side)
self.ksp = ksp
self.converged_reasons = {}
for key, val in ksp.ConvergedReason.__dict__.iteritems():
if isinstance(val, int):
self.converged_reasons[val] = key
def set_matrix( self, mtx ):
mtx = sps.csr_matrix(mtx)
pmtx = self.petsc.Mat().createAIJ( mtx.shape,
csr = (mtx.indptr,
mtx.indices,
mtx.data) )
sol, rhs = pmtx.getVecs()
return pmtx, sol, rhs
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
ksp.setInitialGuessNonzero(True)
prhs[...] = rhs
ksp.solve(prhs, psol)
sol = psol[...].copy()
output('%s(%s) convergence: %s (%s)'
% (self.conf.method, self.conf.precond,
ksp.reason, self.converged_reasons[ksp.reason]))
return sol
class PETScParallelKrylovSolver(PETScKrylovSolver):
"""
PETSc Krylov subspace solver able to run in parallel by storing the
system to disk and running a separate script via `mpiexec`.
The solver and preconditioner types are set upon the solver object
creation. Tolerances can be overriden when called by passing a `conf`
object.
Notes
-----
Convergence is reached when `rnorm < max(eps_r * rnorm_0, eps_a)`,
where, in PETSc, `rnorm` is by default the norm of *preconditioned*
residual.
"""
name = 'ls.petsc_parallel'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1 = {
'name' : 'ls',
'kind' : 'ls.petsc_parallel',
'log_dir' : '.', # Store logs here.
'n_proc' : 5, # Number of processes to run.
'method' : 'cg', # ksp_type
'precond' : 'bjacobi', # pc_type
'sub_precond' : 'icc', # sub_pc_type
'eps_a' : 1e-12, # abstol
'eps_r' : 1e-12, # rtol
'eps_d' : 1e5, # divtol
'i_max' : 1000, # maxits
}
"""
get = make_get_conf(conf, kwargs)
common = PETScKrylovSolver.process_conf(conf, kwargs)
return Struct(log_dir=get('log_dir', '.'),
n_proc=get('n_proc', 1),
sub_precond=get('sub_precond', 'icc')) + common
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
import os, sys, shutil, tempfile
from sfepy import base_dir
from sfepy.base.ioutils import ensure_path
eps_a = get_default(eps_a, self.conf.eps_a)
eps_r = get_default(eps_r, self.conf.eps_r)
i_max = get_default(i_max, self.conf.i_max)
eps_d = self.conf.eps_d
petsc = self.petsc
# There is no use in caching matrix in the solver - always set as new.
pmtx, psol, prhs = self.set_matrix(mtx)
ksp = self.ksp
ksp.setOperators(pmtx)
ksp.setFromOptions() # PETSc.Options() not used yet...
ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)
output_dir = tempfile.mkdtemp()
# Set PETSc rhs, solve, get solution from PETSc solution.
if x0 is not None:
psol[...] = x0
sol0_filename = os.path.join(output_dir, 'sol0.dat')
else:
sol0_filename = ''
prhs[...] = rhs
script_filename = os.path.join(base_dir, 'solvers/petsc_worker.py')
mtx_filename = os.path.join(output_dir, 'mtx.dat')
rhs_filename = os.path.join(output_dir, 'rhs.dat')
sol_filename = os.path.join(output_dir, 'sol.dat')
status_filename = os.path.join(output_dir, 'status.txt')
log_filename = os.path.join(self.conf.log_dir, 'sol.log')
ensure_path(log_filename)
output('storing system to %s...' % output_dir)
tt = time.clock()
view_mtx = petsc.Viewer().createBinary(mtx_filename, mode='w')
view_rhs = petsc.Viewer().createBinary(rhs_filename, mode='w')
pmtx.view(view_mtx)
prhs.view(view_rhs)
if sol0_filename:
view_sol0 = petsc.Viewer().createBinary(sol0_filename, mode='w')
psol.view(view_sol0)
output('...done in %.2f s' % (time.clock() - tt))
command = [
'mpiexec -n %d' % self.conf.n_proc,
sys.executable, script_filename,
'-mtx %s' % mtx_filename, '-rhs %s' % rhs_filename,
'-sol0 %s' % sol0_filename, '-sol %s' % sol_filename,
'-status %s' % status_filename,
'-ksp_type %s' % self.conf.method,
'-pc_type %s' % self.conf.precond,
'-sub_pc_type %s' % self.conf.sub_precond,
'-ksp_atol %.3e' % self.conf.eps_a,
'-ksp_rtol %.3e' % self.conf.eps_r,
'-ksp_max_it %d' % self.conf.i_max,
'-ksp_monitor %s' % log_filename,
'-ksp_view %s' % log_filename,
]
if self.conf.precond_side is not None:
command.append('-ksp_pc_side %s' % self.conf.precond_side)
out = os.system(" ".join(command))
assert_(out == 0)
output('reading solution...')
tt = time.clock()
view_sol = self.petsc.Viewer().createBinary(sol_filename, mode='r')
psol = petsc.Vec().load(view_sol)
fd = open(status_filename, 'r')
line = fd.readline().split()
reason = int(line[0])
elapsed = float(line[1])
fd.close()
output('...done in %.2f s' % (time.clock() - tt))
sol = psol[...].copy()
output('%s(%s, %s/proc) convergence: %s (%s)'
% (self.conf.method, self.conf.precond, self.conf.sub_precond,
reason, self.converged_reasons[reason]))
output('elapsed: %.2f [s]' % elapsed)
shutil.rmtree(output_dir)
return sol
class SchurGeneralized(ScipyDirect):
r"""
Generalized Schur complement.
Defines the matrix blocks and calls user defined function.
"""
name = 'ls.schur_generalized'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.schur_generalized',
{'blocks':
{'u': ['displacement1', 'displacement2'],
'v': ['velocity1', 'velocity2'],
'w': ['pressure1', 'pressure2'],
},
'function': my_schur,
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
common = ScipyDirect.process_conf(conf, kwargs)
return Struct(blocks=get('blocks', None,
'missing "blocks" in options!'),
function=get('function', None,
'missing "function" in options!'),
needs_problem_instance=True) + common
def __init__(self, conf, **kwargs):
from sfepy.discrete.state import State
ScipyDirect.__init__(self, conf, **kwargs)
equations = self.problem.equations
aux_state = State(equations.variables)
conf.idxs = {}
for bk, bv in conf.blocks.iteritems():
aux_state.fill(0.0)
for jj in bv:
idx = equations.variables.di.indx[jj]
aux_state.vec[idx] = nm.nan
aux_state.apply_ebc()
vec0 = aux_state.get_reduced()
conf.idxs[bk] = nm.where(nm.isnan(vec0))[0]
@standard_call
def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
mtxi= self.orig_conf.idxs
mtxslc_s = {}
mtxslc_f = {}
nn = {}
for ik, iv in mtxi.iteritems():
ptr = 0
nn[ik] = len(iv)
mtxslc_s[ik] = []
mtxslc_f[ik] = []
while ptr < nn[ik]:
idx0 = iv[ptr:]
idxrange = nm.arange(idx0[0], idx0[0] + len(idx0))
aux = nm.where(idx0 == idxrange)[0]
mtxslc_s[ik].append(slice(ptr + aux[0], ptr + aux[-1] + 1))
mtxslc_f[ik].append(slice(idx0[aux][0], idx0[aux][-1] + 1))
ptr += aux[-1] + 1
mtxs = {}
rhss = {}
ress = {}
for ir in mtxi.iterkeys():
rhss[ir] = nm.zeros((nn[ir],), dtype=nm.float64)
ress[ir] = nm.zeros((nn[ir],), dtype=nm.float64)
for jr, idxr in enumerate(mtxslc_f[ir]):
rhss[ir][mtxslc_s[ir][jr]] = rhs[idxr]
for ic in mtxi.iterkeys():
mtxid = '%s%s' % (ir, ic)
mtxs[mtxid] = nm.zeros((nn[ir], nn[ic]), dtype=nm.float64)
for jr, idxr in enumerate(mtxslc_f[ir]):
for jc, idxc in enumerate(mtxslc_f[ic]):
iir = mtxslc_s[ir][jr]
iic = mtxslc_s[ic][jc]
mtxs[mtxid][iir, iic] = mtx._get_submatrix(idxr, idxc).todense()
self.orig_conf.function(ress, mtxs, rhss, nn)
res = nm.zeros_like(rhs)
for ir in mtxi.iterkeys():
for jr, idxr in enumerate(mtxslc_f[ir]):
res[idxr] = ress[ir][mtxslc_s[ir][jr]]
return res
def _presolve(self):
if hasattr(self, 'presolve'):
return self.presolve
else:
return self.conf.presolve
class SchurComplement(SchurGeneralized):
r"""
Schur complement.
Solution of the linear system
.. math::
\left[ \begin{array}{cc}
A & B \\
C & D \end{array} \right]
\cdot
\left[ \begin{array}{c}
u \\
v \end{array} \right]
=
\left[ \begin{array}{c}
f \\
g \end{array} \right]
is obtained by solving the following equation:
.. math::
(D - C A^{-1} B) \cdot v = g - C A^{-1} f
variable(s) :math:`u` are specified in "eliminate" list,
variable(s) :math:`v` are specified in "keep" list,
See: http://en.wikipedia.org/wiki/Schur_complement
"""
name = 'ls.schur_complement'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.schur_complement',
{'eliminate': ['displacement'],
'keep': ['pressure'],
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
conf.blocks = {'1': get('eliminate', None,
'missing "eliminate" in options!'),
'2': get('keep', None,
'missing "keep" in options!'),}
conf.function = SchurComplement.schur_fun
common = SchurGeneralized.process_conf(conf, kwargs)
return common
@staticmethod
def schur_fun(res, mtx, rhs, nn):
import scipy.sparse as scs
import scipy.sparse.linalg as sls
invA = sls.splu(scs.csc_matrix(mtx['11']))
invAB = nm.zeros_like(mtx['12'])
for j, b in enumerate(mtx['12'].T):
invAB[:,j] = invA.solve(b)
invAf = invA.solve(rhs['1'])
spC = scs.csc_matrix(mtx['21'])
k_rhs = rhs['2'] - spC * invAf
res['2'] = sls.spsolve(scs.csc_matrix(mtx['22'] - spC * invAB), k_rhs)
res['1'] = invAf - nm.dot(invAB, res['2'])
class MultiProblem(ScipyDirect):
r"""
Conjugate multiple problems.
Allows to define conjugate multiple problems.
"""
name = 'ls.cm_pb'
@staticmethod
def process_conf(conf, kwargs):
"""
Setup solver configuration options.
Example configuration::
solvers = {
'ls': ('ls.cm_pb',
{'others': ['acoustic_subproblem.py'],
'coupling_variables': ['g'],
'needs_problem_instance': True,
})
}
"""
get = make_get_conf(conf, kwargs)
common = ScipyDirect.process_conf(conf, kwargs)
return Struct(others=get('others', None,
'missing "others" in options!'),
coupling_variables=get('coupling_variables', None,
'missing "coupling_variables"!'),
needs_problem_instance=True) + common
def __init__(self, conf, problem, **kwargs):
from sfepy.discrete.state import State
from sfepy.discrete import Problem
from sfepy.base.conf import ProblemConf, get_standard_keywords
from scipy.spatial import cKDTree as KDTree
ScipyDirect.__init__(self, conf, **kwargs)
# init subproblems
pb_vars = problem.get_variables()
# get "master" DofInfo and last index
pb_adi_indx = problem.equations.variables.adi.indx
self.adi_indx = pb_adi_indx.copy()
last_indx = -1
for ii in self.adi_indx.itervalues():
last_indx = nm.max([last_indx, ii.stop])
# coupling variables
self.cvars_to_pb = {}
for jj in conf.coupling_variables:
self.cvars_to_pb[jj] = [None, None]
if jj in pb_vars.names:
if pb_vars[jj].dual_var_name is not None:
self.cvars_to_pb[jj][0] = -1
else:
self.cvars_to_pb[jj][1] = -1
# init subproblems
self.subpb = []
required, other = get_standard_keywords()
master_prefix = output.get_output_prefix()
for ii, ifname in enumerate(conf.others):
sub_prefix = master_prefix[:-1] + '-sub%d:' % (ii + 1)
output.set_output_prefix(sub_prefix)
kwargs['master_problem'] = problem
confi = ProblemConf.from_file(ifname, required, other,
define_args=kwargs)
pbi = Problem.from_conf(confi, init_equations=True)
sti = State(pbi.equations.variables)
pbi.equations.set_data(None, ignore_unknown=True)
pbi.time_update()
pbi.update_materials()
sti.apply_ebc()
pbi_vars = pbi.get_variables()
| output.set_output_prefix(master_prefix) | sfepy.base.base.output.set_output_prefix |
import time
import numpy as nm
import warnings
import scipy.sparse as sps
warnings.simplefilter('ignore', sps.SparseEfficiencyWarning)
from sfepy.base.base import output, get_default, assert_, try_imports, Struct
from sfepy.solvers.solvers import make_get_conf, LinearSolver
def standard_call(call):
"""
Decorator handling argument preparation and timing for linear solvers.
"""
def _standard_call(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
i_max=None, mtx=None, status=None, **kwargs):
tt = time.clock()
conf = get_default(conf, self.conf)
mtx = get_default(mtx, self.mtx)
status = get_default(status, self.status)
assert_(mtx.shape[0] == mtx.shape[1] == rhs.shape[0])
if x0 is not None:
assert_(x0.shape[0] == rhs.shape[0])
result = call(self, rhs, x0, conf, eps_a, eps_r, i_max, mtx, status,
**kwargs)
ttt = time.clock() - tt
if status is not None:
status['time'] = ttt
return result
return _standard_call
class ScipyDirect(LinearSolver):
name = 'ls.scipy_direct'
@staticmethod
def process_conf(conf, kwargs):
"""
Missing items are set to default values.
Example configuration, all items::
solver_1100 = {
'name' : 'dls1100',
'kind' : 'ls.scipy_direct',
'method' : 'superlu',
'presolve' : False,
'warn' : True,
}
"""
get = make_get_conf(conf, kwargs)
common = LinearSolver.process_conf(conf)
return Struct(method=get('method', 'auto'),
presolve=get('presolve', False),
warn=get('warn', True),
i_max=None, eps_a=None, eps_r=None) + common
def __init__(self, conf, **kwargs):
LinearSolver.__init__(self, conf, **kwargs)
um = self.sls = None
aux = try_imports(['import scipy.linsolve as sls',
'import scipy.splinalg.dsolve as sls',
'import scipy.sparse.linalg.dsolve as sls'],
'cannot import scipy sparse direct solvers!')
self.sls = aux['sls']
aux = try_imports(['import scipy.linsolve.umfpack as um',
'import scipy.splinalg.dsolve.umfpack as um',
'import scipy.sparse.linalg.dsolve.umfpack as um',
'import scikits.umfpack as um'])
if 'um' in aux:
um = aux['um']
if um is not None:
is_umfpack = hasattr(um, 'UMFPACK_OK')
else:
is_umfpack = False
method = self.conf.method
if method == 'superlu':
self.sls.use_solver(useUmfpack=False)
elif method == 'umfpack':
if not is_umfpack and self.conf.warn:
| output('umfpack not available, using superlu!') | sfepy.base.base.output |
#!/usr/bin/env python
"""
Show terms use in problem description files in the given directory.
"""
from __future__ import absolute_import
import sys
import six
sys.path.append('.')
import os
from argparse import ArgumentParser
from sfepy.base.base import output, dict_from_keys_init, ordered_iteritems
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.base.ioutils import locate_files
from sfepy.discrete.equations import parse_definition
from sfepy.terms import term_table
helps = {
'counts' : 'show terms use counts only',
'unused' : 'show unused terms only',
}
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-c', '--counts',
action='store_true', dest='counts',
default=False, help=helps['counts'])
parser.add_argument('-u', '--unused',
action='store_true', dest='unused',
default=False, help=helps['unused'])
parser.add_argument('directory')
options = parser.parse_args()
pdf_dir = os.path.realpath(options.directory)
required, other = | get_standard_keywords() | sfepy.base.conf.get_standard_keywords |
#!/usr/bin/env python
"""
Show terms use in problem description files in the given directory.
"""
from __future__ import absolute_import
import sys
import six
sys.path.append('.')
import os
from argparse import ArgumentParser
from sfepy.base.base import output, dict_from_keys_init, ordered_iteritems
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.base.ioutils import locate_files
from sfepy.discrete.equations import parse_definition
from sfepy.terms import term_table
helps = {
'counts' : 'show terms use counts only',
'unused' : 'show unused terms only',
}
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-c', '--counts',
action='store_true', dest='counts',
default=False, help=helps['counts'])
parser.add_argument('-u', '--unused',
action='store_true', dest='unused',
default=False, help=helps['unused'])
parser.add_argument('directory')
options = parser.parse_args()
pdf_dir = os.path.realpath(options.directory)
required, other = get_standard_keywords()
terms_use = dict_from_keys_init(term_table.keys(), set)
for filename in | locate_files('*.py', pdf_dir) | sfepy.base.ioutils.locate_files |
#!/usr/bin/env python
"""
Show terms use in problem description files in the given directory.
"""
from __future__ import absolute_import
import sys
import six
sys.path.append('.')
import os
from argparse import ArgumentParser
from sfepy.base.base import output, dict_from_keys_init, ordered_iteritems
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.base.ioutils import locate_files
from sfepy.discrete.equations import parse_definition
from sfepy.terms import term_table
helps = {
'counts' : 'show terms use counts only',
'unused' : 'show unused terms only',
}
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-c', '--counts',
action='store_true', dest='counts',
default=False, help=helps['counts'])
parser.add_argument('-u', '--unused',
action='store_true', dest='unused',
default=False, help=helps['unused'])
parser.add_argument('directory')
options = parser.parse_args()
pdf_dir = os.path.realpath(options.directory)
required, other = get_standard_keywords()
terms_use = dict_from_keys_init(term_table.keys(), set)
for filename in locate_files('*.py', pdf_dir):
base = filename.replace(pdf_dir, '').lstrip(os.path.sep)
output('trying "%s"...' % base)
try:
conf = ProblemConf.from_file(filename, required, other,
verbose=False)
except:
output('...failed')
continue
use = conf.options.get('use_equations', 'equations')
eqs_conf = getattr(conf, use)
for key, eq_conf in six.iteritems(eqs_conf):
term_descs = parse_definition(eq_conf)
for td in term_descs:
terms_use[td.name].add(base)
output('...ok')
| output('...done') | sfepy.base.base.output |
#!/usr/bin/env python
"""
Show terms use in problem description files in the given directory.
"""
from __future__ import absolute_import
import sys
import six
sys.path.append('.')
import os
from argparse import ArgumentParser
from sfepy.base.base import output, dict_from_keys_init, ordered_iteritems
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.base.ioutils import locate_files
from sfepy.discrete.equations import parse_definition
from sfepy.terms import term_table
helps = {
'counts' : 'show terms use counts only',
'unused' : 'show unused terms only',
}
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-c', '--counts',
action='store_true', dest='counts',
default=False, help=helps['counts'])
parser.add_argument('-u', '--unused',
action='store_true', dest='unused',
default=False, help=helps['unused'])
parser.add_argument('directory')
options = parser.parse_args()
pdf_dir = os.path.realpath(options.directory)
required, other = get_standard_keywords()
terms_use = dict_from_keys_init( | term_table.keys() | sfepy.terms.term_table.keys |
#!/usr/bin/env python
"""
Show terms use in problem description files in the given directory.
"""
from __future__ import absolute_import
import sys
import six
sys.path.append('.')
import os
from argparse import ArgumentParser
from sfepy.base.base import output, dict_from_keys_init, ordered_iteritems
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.base.ioutils import locate_files
from sfepy.discrete.equations import parse_definition
from sfepy.terms import term_table
helps = {
'counts' : 'show terms use counts only',
'unused' : 'show unused terms only',
}
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-c', '--counts',
action='store_true', dest='counts',
default=False, help=helps['counts'])
parser.add_argument('-u', '--unused',
action='store_true', dest='unused',
default=False, help=helps['unused'])
parser.add_argument('directory')
options = parser.parse_args()
pdf_dir = os.path.realpath(options.directory)
required, other = get_standard_keywords()
terms_use = dict_from_keys_init(term_table.keys(), set)
for filename in locate_files('*.py', pdf_dir):
base = filename.replace(pdf_dir, '').lstrip(os.path.sep)
| output('trying "%s"...' % base) | sfepy.base.base.output |
#!/usr/bin/env python
"""
Show terms use in problem description files in the given directory.
"""
from __future__ import absolute_import
import sys
import six
sys.path.append('.')
import os
from argparse import ArgumentParser
from sfepy.base.base import output, dict_from_keys_init, ordered_iteritems
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.base.ioutils import locate_files
from sfepy.discrete.equations import parse_definition
from sfepy.terms import term_table
helps = {
'counts' : 'show terms use counts only',
'unused' : 'show unused terms only',
}
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-c', '--counts',
action='store_true', dest='counts',
default=False, help=helps['counts'])
parser.add_argument('-u', '--unused',
action='store_true', dest='unused',
default=False, help=helps['unused'])
parser.add_argument('directory')
options = parser.parse_args()
pdf_dir = os.path.realpath(options.directory)
required, other = get_standard_keywords()
terms_use = dict_from_keys_init(term_table.keys(), set)
for filename in locate_files('*.py', pdf_dir):
base = filename.replace(pdf_dir, '').lstrip(os.path.sep)
output('trying "%s"...' % base)
try:
conf = ProblemConf.from_file(filename, required, other,
verbose=False)
except:
output('...failed')
continue
use = conf.options.get('use_equations', 'equations')
eqs_conf = getattr(conf, use)
for key, eq_conf in six.iteritems(eqs_conf):
term_descs = parse_definition(eq_conf)
for td in term_descs:
terms_use[td.name].add(base)
| output('...ok') | sfepy.base.base.output |
#!/usr/bin/env python
"""
Show terms use in problem description files in the given directory.
"""
from __future__ import absolute_import
import sys
import six
sys.path.append('.')
import os
from argparse import ArgumentParser
from sfepy.base.base import output, dict_from_keys_init, ordered_iteritems
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.base.ioutils import locate_files
from sfepy.discrete.equations import parse_definition
from sfepy.terms import term_table
helps = {
'counts' : 'show terms use counts only',
'unused' : 'show unused terms only',
}
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-c', '--counts',
action='store_true', dest='counts',
default=False, help=helps['counts'])
parser.add_argument('-u', '--unused',
action='store_true', dest='unused',
default=False, help=helps['unused'])
parser.add_argument('directory')
options = parser.parse_args()
pdf_dir = os.path.realpath(options.directory)
required, other = get_standard_keywords()
terms_use = dict_from_keys_init(term_table.keys(), set)
for filename in locate_files('*.py', pdf_dir):
base = filename.replace(pdf_dir, '').lstrip(os.path.sep)
output('trying "%s"...' % base)
try:
conf = ProblemConf.from_file(filename, required, other,
verbose=False)
except:
output('...failed')
continue
use = conf.options.get('use_equations', 'equations')
eqs_conf = getattr(conf, use)
for key, eq_conf in six.iteritems(eqs_conf):
term_descs = parse_definition(eq_conf)
for td in term_descs:
terms_use[td.name].add(base)
output('...ok')
output('...done')
if options.unused:
| output('unused terms:') | sfepy.base.base.output |
#!/usr/bin/env python
"""
Show terms use in problem description files in the given directory.
"""
from __future__ import absolute_import
import sys
import six
sys.path.append('.')
import os
from argparse import ArgumentParser
from sfepy.base.base import output, dict_from_keys_init, ordered_iteritems
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.base.ioutils import locate_files
from sfepy.discrete.equations import parse_definition
from sfepy.terms import term_table
helps = {
'counts' : 'show terms use counts only',
'unused' : 'show unused terms only',
}
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-c', '--counts',
action='store_true', dest='counts',
default=False, help=helps['counts'])
parser.add_argument('-u', '--unused',
action='store_true', dest='unused',
default=False, help=helps['unused'])
parser.add_argument('directory')
options = parser.parse_args()
pdf_dir = os.path.realpath(options.directory)
required, other = get_standard_keywords()
terms_use = dict_from_keys_init(term_table.keys(), set)
for filename in locate_files('*.py', pdf_dir):
base = filename.replace(pdf_dir, '').lstrip(os.path.sep)
output('trying "%s"...' % base)
try:
conf = ProblemConf.from_file(filename, required, other,
verbose=False)
except:
output('...failed')
continue
use = conf.options.get('use_equations', 'equations')
eqs_conf = getattr(conf, use)
for key, eq_conf in six.iteritems(eqs_conf):
term_descs = parse_definition(eq_conf)
for td in term_descs:
terms_use[td.name].add(base)
output('...ok')
output('...done')
if options.unused:
output('unused terms:')
unused = [name for name in terms_use.keys()
if len(terms_use[name]) == 0]
for name in sorted(unused):
output(' ' + name)
output('total: %d' % len(unused))
else:
| output('terms use:') | sfepy.base.base.output |
#!/usr/bin/env python
"""
Show terms use in problem description files in the given directory.
"""
from __future__ import absolute_import
import sys
import six
sys.path.append('.')
import os
from argparse import ArgumentParser
from sfepy.base.base import output, dict_from_keys_init, ordered_iteritems
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.base.ioutils import locate_files
from sfepy.discrete.equations import parse_definition
from sfepy.terms import term_table
helps = {
'counts' : 'show terms use counts only',
'unused' : 'show unused terms only',
}
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-c', '--counts',
action='store_true', dest='counts',
default=False, help=helps['counts'])
parser.add_argument('-u', '--unused',
action='store_true', dest='unused',
default=False, help=helps['unused'])
parser.add_argument('directory')
options = parser.parse_args()
pdf_dir = os.path.realpath(options.directory)
required, other = get_standard_keywords()
terms_use = dict_from_keys_init(term_table.keys(), set)
for filename in locate_files('*.py', pdf_dir):
base = filename.replace(pdf_dir, '').lstrip(os.path.sep)
output('trying "%s"...' % base)
try:
conf = ProblemConf.from_file(filename, required, other,
verbose=False)
except:
output('...failed')
continue
use = conf.options.get('use_equations', 'equations')
eqs_conf = getattr(conf, use)
for key, eq_conf in six.iteritems(eqs_conf):
term_descs = parse_definition(eq_conf)
for td in term_descs:
terms_use[td.name].add(base)
output('...ok')
output('...done')
if options.unused:
output('unused terms:')
unused = [name for name in terms_use.keys()
if len(terms_use[name]) == 0]
for name in sorted(unused):
output(' ' + name)
output('total: %d' % len(unused))
else:
output('terms use:')
for name, ex_names in | ordered_iteritems(terms_use) | sfepy.base.base.ordered_iteritems |
#!/usr/bin/env python
"""
Show terms use in problem description files in the given directory.
"""
from __future__ import absolute_import
import sys
import six
sys.path.append('.')
import os
from argparse import ArgumentParser
from sfepy.base.base import output, dict_from_keys_init, ordered_iteritems
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.base.ioutils import locate_files
from sfepy.discrete.equations import parse_definition
from sfepy.terms import term_table
helps = {
'counts' : 'show terms use counts only',
'unused' : 'show unused terms only',
}
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-c', '--counts',
action='store_true', dest='counts',
default=False, help=helps['counts'])
parser.add_argument('-u', '--unused',
action='store_true', dest='unused',
default=False, help=helps['unused'])
parser.add_argument('directory')
options = parser.parse_args()
pdf_dir = os.path.realpath(options.directory)
required, other = get_standard_keywords()
terms_use = dict_from_keys_init(term_table.keys(), set)
for filename in locate_files('*.py', pdf_dir):
base = filename.replace(pdf_dir, '').lstrip(os.path.sep)
output('trying "%s"...' % base)
try:
conf = ProblemConf.from_file(filename, required, other,
verbose=False)
except:
output('...failed')
continue
use = conf.options.get('use_equations', 'equations')
eqs_conf = getattr(conf, use)
for key, eq_conf in six.iteritems(eqs_conf):
term_descs = | parse_definition(eq_conf) | sfepy.discrete.equations.parse_definition |
#!/usr/bin/env python
"""
Show terms use in problem description files in the given directory.
"""
from __future__ import absolute_import
import sys
import six
sys.path.append('.')
import os
from argparse import ArgumentParser
from sfepy.base.base import output, dict_from_keys_init, ordered_iteritems
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.base.ioutils import locate_files
from sfepy.discrete.equations import parse_definition
from sfepy.terms import term_table
helps = {
'counts' : 'show terms use counts only',
'unused' : 'show unused terms only',
}
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-c', '--counts',
action='store_true', dest='counts',
default=False, help=helps['counts'])
parser.add_argument('-u', '--unused',
action='store_true', dest='unused',
default=False, help=helps['unused'])
parser.add_argument('directory')
options = parser.parse_args()
pdf_dir = os.path.realpath(options.directory)
required, other = get_standard_keywords()
terms_use = dict_from_keys_init(term_table.keys(), set)
for filename in locate_files('*.py', pdf_dir):
base = filename.replace(pdf_dir, '').lstrip(os.path.sep)
output('trying "%s"...' % base)
try:
conf = ProblemConf.from_file(filename, required, other,
verbose=False)
except:
output('...failed')
continue
use = conf.options.get('use_equations', 'equations')
eqs_conf = getattr(conf, use)
for key, eq_conf in six.iteritems(eqs_conf):
term_descs = parse_definition(eq_conf)
for td in term_descs:
terms_use[td.name].add(base)
output('...ok')
output('...done')
if options.unused:
output('unused terms:')
unused = [name for name in terms_use.keys()
if len(terms_use[name]) == 0]
for name in sorted(unused):
| output(' ' + name) | sfepy.base.base.output |
#!/usr/bin/env python
"""
Show terms use in problem description files in the given directory.
"""
from __future__ import absolute_import
import sys
import six
sys.path.append('.')
import os
from argparse import ArgumentParser
from sfepy.base.base import output, dict_from_keys_init, ordered_iteritems
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.base.ioutils import locate_files
from sfepy.discrete.equations import parse_definition
from sfepy.terms import term_table
helps = {
'counts' : 'show terms use counts only',
'unused' : 'show unused terms only',
}
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-c', '--counts',
action='store_true', dest='counts',
default=False, help=helps['counts'])
parser.add_argument('-u', '--unused',
action='store_true', dest='unused',
default=False, help=helps['unused'])
parser.add_argument('directory')
options = parser.parse_args()
pdf_dir = os.path.realpath(options.directory)
required, other = get_standard_keywords()
terms_use = dict_from_keys_init(term_table.keys(), set)
for filename in locate_files('*.py', pdf_dir):
base = filename.replace(pdf_dir, '').lstrip(os.path.sep)
output('trying "%s"...' % base)
try:
conf = ProblemConf.from_file(filename, required, other,
verbose=False)
except:
| output('...failed') | sfepy.base.base.output |
#!/usr/bin/env python
"""
Show terms use in problem description files in the given directory.
"""
from __future__ import absolute_import
import sys
import six
sys.path.append('.')
import os
from argparse import ArgumentParser
from sfepy.base.base import output, dict_from_keys_init, ordered_iteritems
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.base.ioutils import locate_files
from sfepy.discrete.equations import parse_definition
from sfepy.terms import term_table
helps = {
'counts' : 'show terms use counts only',
'unused' : 'show unused terms only',
}
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-c', '--counts',
action='store_true', dest='counts',
default=False, help=helps['counts'])
parser.add_argument('-u', '--unused',
action='store_true', dest='unused',
default=False, help=helps['unused'])
parser.add_argument('directory')
options = parser.parse_args()
pdf_dir = os.path.realpath(options.directory)
required, other = get_standard_keywords()
terms_use = dict_from_keys_init(term_table.keys(), set)
for filename in locate_files('*.py', pdf_dir):
base = filename.replace(pdf_dir, '').lstrip(os.path.sep)
output('trying "%s"...' % base)
try:
conf = ProblemConf.from_file(filename, required, other,
verbose=False)
except:
output('...failed')
continue
use = conf.options.get('use_equations', 'equations')
eqs_conf = getattr(conf, use)
for key, eq_conf in six.iteritems(eqs_conf):
term_descs = parse_definition(eq_conf)
for td in term_descs:
terms_use[td.name].add(base)
output('...ok')
output('...done')
if options.unused:
output('unused terms:')
unused = [name for name in terms_use.keys()
if len(terms_use[name]) == 0]
for name in sorted(unused):
output(' ' + name)
output('total: %d' % len(unused))
else:
output('terms use:')
for name, ex_names in ordered_iteritems(terms_use):
output('%s: %d' % (name, len(ex_names)))
if not options.counts:
for ex_name in sorted(ex_names):
| output(' ' + ex_name) | sfepy.base.base.output |
#!/usr/bin/env python
"""
Transient Laplace equation (heat equation) with non-constant initial conditions
given by a function, using commands for interactive use.
The script allows setting various simulation parameters, namely:
- the diffusivity coefficient
- the max. initial condition value
- temperature field approximation order
- uniform mesh refinement
The example shows also how to probe the results.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/diffusion/time_poisson_interactive.py -h
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.problem import prepare_matrix
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver
from sfepy.discrete.probes import LineProbe, CircleProbe
from sfepy.discrete.projections import project_by_component
def gen_probes(problem):
"""
Define a line probe and a circle probe.
"""
# Use enough points for higher order approximations.
n_point = 1000
p0, p1 = nm.array([0.0, 0.0, 0.0]), nm.array([0.1, 0.0, 0.0])
line = | LineProbe(p0, p1, n_point, share_geometry=True) | sfepy.discrete.probes.LineProbe |
#!/usr/bin/env python
"""
Transient Laplace equation (heat equation) with non-constant initial conditions
given by a function, using commands for interactive use.
The script allows setting various simulation parameters, namely:
- the diffusivity coefficient
- the max. initial condition value
- temperature field approximation order
- uniform mesh refinement
The example shows also how to probe the results.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/diffusion/time_poisson_interactive.py -h
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.problem import prepare_matrix
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver
from sfepy.discrete.probes import LineProbe, CircleProbe
from sfepy.discrete.projections import project_by_component
def gen_probes(problem):
"""
Define a line probe and a circle probe.
"""
# Use enough points for higher order approximations.
n_point = 1000
p0, p1 = nm.array([0.0, 0.0, 0.0]), nm.array([0.1, 0.0, 0.0])
line = LineProbe(p0, p1, n_point, share_geometry=True)
# Workaround current probe code shortcoming.
line.set_options(close_limit=0.5)
centre = 0.5 * (p0 + p1)
normal = [0.0, 1.0, 0.0]
r = 0.019
circle = | CircleProbe(centre, normal, r, n_point, share_geometry=True) | sfepy.discrete.probes.CircleProbe |
#!/usr/bin/env python
"""
Transient Laplace equation (heat equation) with non-constant initial conditions
given by a function, using commands for interactive use.
The script allows setting various simulation parameters, namely:
- the diffusivity coefficient
- the max. initial condition value
- temperature field approximation order
- uniform mesh refinement
The example shows also how to probe the results.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/diffusion/time_poisson_interactive.py -h
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.problem import prepare_matrix
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver
from sfepy.discrete.probes import LineProbe, CircleProbe
from sfepy.discrete.projections import project_by_component
def gen_probes(problem):
"""
Define a line probe and a circle probe.
"""
# Use enough points for higher order approximations.
n_point = 1000
p0, p1 = nm.array([0.0, 0.0, 0.0]), nm.array([0.1, 0.0, 0.0])
line = LineProbe(p0, p1, n_point, share_geometry=True)
# Workaround current probe code shortcoming.
line.set_options(close_limit=0.5)
centre = 0.5 * (p0 + p1)
normal = [0.0, 1.0, 0.0]
r = 0.019
circle = CircleProbe(centre, normal, r, n_point, share_geometry=True)
circle.set_options(close_limit=0.0)
probes = [line, circle]
labels = ['%s -> %s' % (p0, p1),
'circle(%s, %s, %s' % (centre, normal, r)]
return probes, labels
def probe_results(ax_num, T, dvel, probe, label):
"""
Probe the results using the given probe and plot the probed values.
"""
results = {}
pars, vals = probe(T)
results['T'] = (pars, vals)
pars, vals = probe(dvel)
results['dvel'] = (pars, vals)
fig = plt.figure(1)
ax = plt.subplot(2, 2, 2 * ax_num + 1)
ax.cla()
pars, vals = results['T']
ax.plot(pars, vals, label=r'$T$', lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('temperature')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
ax = plt.subplot(2, 2, 2 * ax_num + 2)
ax.cla()
pars, vals = results['dvel']
for ic in range(vals.shape[1]):
ax.plot(pars, vals[:, ic], label=r'$w_{%d}$' % (ic + 1),
lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('diffusion velocity')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
return fig, results
helps = {
'diffusivity' : 'the diffusivity coefficient [default: %(default)s]',
'ic_max' : 'the max. initial condition value [default: %(default)s]',
'order' : 'temperature field approximation order [default: %(default)s]',
'refine' : 'uniform mesh refinement level [default: %(default)s]',
'probe' : 'probe the results',
'show' : 'show the probing results figure, if --probe is used',
}
def main():
from sfepy import data_dir
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--diffusivity', metavar='float', type=float,
action='store', dest='diffusivity',
default=1e-5, help=helps['diffusivity'])
parser.add_argument('--ic-max', metavar='float', type=float,
action='store', dest='ic_max',
default=2.0, help=helps['ic_max'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=2, help=helps['order'])
parser.add_argument('-r', '--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-p', '--probe',
action="store_true", dest='probe',
default=False, help=helps['probe'])
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
assert_((0 < options.order),
'temperature approximation order must be at least 1!')
| output('using values:') | sfepy.base.base.output |
#!/usr/bin/env python
"""
Transient Laplace equation (heat equation) with non-constant initial conditions
given by a function, using commands for interactive use.
The script allows setting various simulation parameters, namely:
- the diffusivity coefficient
- the max. initial condition value
- temperature field approximation order
- uniform mesh refinement
The example shows also how to probe the results.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/diffusion/time_poisson_interactive.py -h
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.problem import prepare_matrix
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver
from sfepy.discrete.probes import LineProbe, CircleProbe
from sfepy.discrete.projections import project_by_component
def gen_probes(problem):
"""
Define a line probe and a circle probe.
"""
# Use enough points for higher order approximations.
n_point = 1000
p0, p1 = nm.array([0.0, 0.0, 0.0]), nm.array([0.1, 0.0, 0.0])
line = LineProbe(p0, p1, n_point, share_geometry=True)
# Workaround current probe code shortcoming.
line.set_options(close_limit=0.5)
centre = 0.5 * (p0 + p1)
normal = [0.0, 1.0, 0.0]
r = 0.019
circle = CircleProbe(centre, normal, r, n_point, share_geometry=True)
circle.set_options(close_limit=0.0)
probes = [line, circle]
labels = ['%s -> %s' % (p0, p1),
'circle(%s, %s, %s' % (centre, normal, r)]
return probes, labels
def probe_results(ax_num, T, dvel, probe, label):
"""
Probe the results using the given probe and plot the probed values.
"""
results = {}
pars, vals = probe(T)
results['T'] = (pars, vals)
pars, vals = probe(dvel)
results['dvel'] = (pars, vals)
fig = plt.figure(1)
ax = plt.subplot(2, 2, 2 * ax_num + 1)
ax.cla()
pars, vals = results['T']
ax.plot(pars, vals, label=r'$T$', lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('temperature')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
ax = plt.subplot(2, 2, 2 * ax_num + 2)
ax.cla()
pars, vals = results['dvel']
for ic in range(vals.shape[1]):
ax.plot(pars, vals[:, ic], label=r'$w_{%d}$' % (ic + 1),
lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('diffusion velocity')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
return fig, results
helps = {
'diffusivity' : 'the diffusivity coefficient [default: %(default)s]',
'ic_max' : 'the max. initial condition value [default: %(default)s]',
'order' : 'temperature field approximation order [default: %(default)s]',
'refine' : 'uniform mesh refinement level [default: %(default)s]',
'probe' : 'probe the results',
'show' : 'show the probing results figure, if --probe is used',
}
def main():
from sfepy import data_dir
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--diffusivity', metavar='float', type=float,
action='store', dest='diffusivity',
default=1e-5, help=helps['diffusivity'])
parser.add_argument('--ic-max', metavar='float', type=float,
action='store', dest='ic_max',
default=2.0, help=helps['ic_max'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=2, help=helps['order'])
parser.add_argument('-r', '--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-p', '--probe',
action="store_true", dest='probe',
default=False, help=helps['probe'])
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
assert_((0 < options.order),
'temperature approximation order must be at least 1!')
output('using values:')
| output(' diffusivity:', options.diffusivity) | sfepy.base.base.output |
#!/usr/bin/env python
"""
Transient Laplace equation (heat equation) with non-constant initial conditions
given by a function, using commands for interactive use.
The script allows setting various simulation parameters, namely:
- the diffusivity coefficient
- the max. initial condition value
- temperature field approximation order
- uniform mesh refinement
The example shows also how to probe the results.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/diffusion/time_poisson_interactive.py -h
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.problem import prepare_matrix
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver
from sfepy.discrete.probes import LineProbe, CircleProbe
from sfepy.discrete.projections import project_by_component
def gen_probes(problem):
"""
Define a line probe and a circle probe.
"""
# Use enough points for higher order approximations.
n_point = 1000
p0, p1 = nm.array([0.0, 0.0, 0.0]), nm.array([0.1, 0.0, 0.0])
line = LineProbe(p0, p1, n_point, share_geometry=True)
# Workaround current probe code shortcoming.
line.set_options(close_limit=0.5)
centre = 0.5 * (p0 + p1)
normal = [0.0, 1.0, 0.0]
r = 0.019
circle = CircleProbe(centre, normal, r, n_point, share_geometry=True)
circle.set_options(close_limit=0.0)
probes = [line, circle]
labels = ['%s -> %s' % (p0, p1),
'circle(%s, %s, %s' % (centre, normal, r)]
return probes, labels
def probe_results(ax_num, T, dvel, probe, label):
"""
Probe the results using the given probe and plot the probed values.
"""
results = {}
pars, vals = probe(T)
results['T'] = (pars, vals)
pars, vals = probe(dvel)
results['dvel'] = (pars, vals)
fig = plt.figure(1)
ax = plt.subplot(2, 2, 2 * ax_num + 1)
ax.cla()
pars, vals = results['T']
ax.plot(pars, vals, label=r'$T$', lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('temperature')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
ax = plt.subplot(2, 2, 2 * ax_num + 2)
ax.cla()
pars, vals = results['dvel']
for ic in range(vals.shape[1]):
ax.plot(pars, vals[:, ic], label=r'$w_{%d}$' % (ic + 1),
lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('diffusion velocity')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
return fig, results
helps = {
'diffusivity' : 'the diffusivity coefficient [default: %(default)s]',
'ic_max' : 'the max. initial condition value [default: %(default)s]',
'order' : 'temperature field approximation order [default: %(default)s]',
'refine' : 'uniform mesh refinement level [default: %(default)s]',
'probe' : 'probe the results',
'show' : 'show the probing results figure, if --probe is used',
}
def main():
from sfepy import data_dir
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--diffusivity', metavar='float', type=float,
action='store', dest='diffusivity',
default=1e-5, help=helps['diffusivity'])
parser.add_argument('--ic-max', metavar='float', type=float,
action='store', dest='ic_max',
default=2.0, help=helps['ic_max'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=2, help=helps['order'])
parser.add_argument('-r', '--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-p', '--probe',
action="store_true", dest='probe',
default=False, help=helps['probe'])
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
assert_((0 < options.order),
'temperature approximation order must be at least 1!')
output('using values:')
output(' diffusivity:', options.diffusivity)
| output(' max. IC value:', options.ic_max) | sfepy.base.base.output |
#!/usr/bin/env python
"""
Transient Laplace equation (heat equation) with non-constant initial conditions
given by a function, using commands for interactive use.
The script allows setting various simulation parameters, namely:
- the diffusivity coefficient
- the max. initial condition value
- temperature field approximation order
- uniform mesh refinement
The example shows also how to probe the results.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/diffusion/time_poisson_interactive.py -h
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.problem import prepare_matrix
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver
from sfepy.discrete.probes import LineProbe, CircleProbe
from sfepy.discrete.projections import project_by_component
def gen_probes(problem):
"""
Define a line probe and a circle probe.
"""
# Use enough points for higher order approximations.
n_point = 1000
p0, p1 = nm.array([0.0, 0.0, 0.0]), nm.array([0.1, 0.0, 0.0])
line = LineProbe(p0, p1, n_point, share_geometry=True)
# Workaround current probe code shortcoming.
line.set_options(close_limit=0.5)
centre = 0.5 * (p0 + p1)
normal = [0.0, 1.0, 0.0]
r = 0.019
circle = CircleProbe(centre, normal, r, n_point, share_geometry=True)
circle.set_options(close_limit=0.0)
probes = [line, circle]
labels = ['%s -> %s' % (p0, p1),
'circle(%s, %s, %s' % (centre, normal, r)]
return probes, labels
def probe_results(ax_num, T, dvel, probe, label):
"""
Probe the results using the given probe and plot the probed values.
"""
results = {}
pars, vals = probe(T)
results['T'] = (pars, vals)
pars, vals = probe(dvel)
results['dvel'] = (pars, vals)
fig = plt.figure(1)
ax = plt.subplot(2, 2, 2 * ax_num + 1)
ax.cla()
pars, vals = results['T']
ax.plot(pars, vals, label=r'$T$', lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('temperature')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
ax = plt.subplot(2, 2, 2 * ax_num + 2)
ax.cla()
pars, vals = results['dvel']
for ic in range(vals.shape[1]):
ax.plot(pars, vals[:, ic], label=r'$w_{%d}$' % (ic + 1),
lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('diffusion velocity')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
return fig, results
helps = {
'diffusivity' : 'the diffusivity coefficient [default: %(default)s]',
'ic_max' : 'the max. initial condition value [default: %(default)s]',
'order' : 'temperature field approximation order [default: %(default)s]',
'refine' : 'uniform mesh refinement level [default: %(default)s]',
'probe' : 'probe the results',
'show' : 'show the probing results figure, if --probe is used',
}
def main():
from sfepy import data_dir
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--diffusivity', metavar='float', type=float,
action='store', dest='diffusivity',
default=1e-5, help=helps['diffusivity'])
parser.add_argument('--ic-max', metavar='float', type=float,
action='store', dest='ic_max',
default=2.0, help=helps['ic_max'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=2, help=helps['order'])
parser.add_argument('-r', '--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-p', '--probe',
action="store_true", dest='probe',
default=False, help=helps['probe'])
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
assert_((0 < options.order),
'temperature approximation order must be at least 1!')
output('using values:')
output(' diffusivity:', options.diffusivity)
output(' max. IC value:', options.ic_max)
| output('uniform mesh refinement level:', options.refine) | sfepy.base.base.output |
#!/usr/bin/env python
"""
Transient Laplace equation (heat equation) with non-constant initial conditions
given by a function, using commands for interactive use.
The script allows setting various simulation parameters, namely:
- the diffusivity coefficient
- the max. initial condition value
- temperature field approximation order
- uniform mesh refinement
The example shows also how to probe the results.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/diffusion/time_poisson_interactive.py -h
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.problem import prepare_matrix
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver
from sfepy.discrete.probes import LineProbe, CircleProbe
from sfepy.discrete.projections import project_by_component
def gen_probes(problem):
"""
Define a line probe and a circle probe.
"""
# Use enough points for higher order approximations.
n_point = 1000
p0, p1 = nm.array([0.0, 0.0, 0.0]), nm.array([0.1, 0.0, 0.0])
line = LineProbe(p0, p1, n_point, share_geometry=True)
# Workaround current probe code shortcoming.
line.set_options(close_limit=0.5)
centre = 0.5 * (p0 + p1)
normal = [0.0, 1.0, 0.0]
r = 0.019
circle = CircleProbe(centre, normal, r, n_point, share_geometry=True)
circle.set_options(close_limit=0.0)
probes = [line, circle]
labels = ['%s -> %s' % (p0, p1),
'circle(%s, %s, %s' % (centre, normal, r)]
return probes, labels
def probe_results(ax_num, T, dvel, probe, label):
"""
Probe the results using the given probe and plot the probed values.
"""
results = {}
pars, vals = probe(T)
results['T'] = (pars, vals)
pars, vals = probe(dvel)
results['dvel'] = (pars, vals)
fig = plt.figure(1)
ax = plt.subplot(2, 2, 2 * ax_num + 1)
ax.cla()
pars, vals = results['T']
ax.plot(pars, vals, label=r'$T$', lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('temperature')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
ax = plt.subplot(2, 2, 2 * ax_num + 2)
ax.cla()
pars, vals = results['dvel']
for ic in range(vals.shape[1]):
ax.plot(pars, vals[:, ic], label=r'$w_{%d}$' % (ic + 1),
lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('diffusion velocity')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
return fig, results
helps = {
'diffusivity' : 'the diffusivity coefficient [default: %(default)s]',
'ic_max' : 'the max. initial condition value [default: %(default)s]',
'order' : 'temperature field approximation order [default: %(default)s]',
'refine' : 'uniform mesh refinement level [default: %(default)s]',
'probe' : 'probe the results',
'show' : 'show the probing results figure, if --probe is used',
}
def main():
from sfepy import data_dir
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--diffusivity', metavar='float', type=float,
action='store', dest='diffusivity',
default=1e-5, help=helps['diffusivity'])
parser.add_argument('--ic-max', metavar='float', type=float,
action='store', dest='ic_max',
default=2.0, help=helps['ic_max'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=2, help=helps['order'])
parser.add_argument('-r', '--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-p', '--probe',
action="store_true", dest='probe',
default=False, help=helps['probe'])
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
assert_((0 < options.order),
'temperature approximation order must be at least 1!')
output('using values:')
output(' diffusivity:', options.diffusivity)
output(' max. IC value:', options.ic_max)
output('uniform mesh refinement level:', options.refine)
mesh = | Mesh.from_file(data_dir + '/meshes/3d/cylinder.mesh') | sfepy.discrete.fem.Mesh.from_file |
#!/usr/bin/env python
"""
Transient Laplace equation (heat equation) with non-constant initial conditions
given by a function, using commands for interactive use.
The script allows setting various simulation parameters, namely:
- the diffusivity coefficient
- the max. initial condition value
- temperature field approximation order
- uniform mesh refinement
The example shows also how to probe the results.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/diffusion/time_poisson_interactive.py -h
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.problem import prepare_matrix
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver
from sfepy.discrete.probes import LineProbe, CircleProbe
from sfepy.discrete.projections import project_by_component
def gen_probes(problem):
"""
Define a line probe and a circle probe.
"""
# Use enough points for higher order approximations.
n_point = 1000
p0, p1 = nm.array([0.0, 0.0, 0.0]), nm.array([0.1, 0.0, 0.0])
line = LineProbe(p0, p1, n_point, share_geometry=True)
# Workaround current probe code shortcoming.
line.set_options(close_limit=0.5)
centre = 0.5 * (p0 + p1)
normal = [0.0, 1.0, 0.0]
r = 0.019
circle = CircleProbe(centre, normal, r, n_point, share_geometry=True)
circle.set_options(close_limit=0.0)
probes = [line, circle]
labels = ['%s -> %s' % (p0, p1),
'circle(%s, %s, %s' % (centre, normal, r)]
return probes, labels
def probe_results(ax_num, T, dvel, probe, label):
"""
Probe the results using the given probe and plot the probed values.
"""
results = {}
pars, vals = probe(T)
results['T'] = (pars, vals)
pars, vals = probe(dvel)
results['dvel'] = (pars, vals)
fig = plt.figure(1)
ax = plt.subplot(2, 2, 2 * ax_num + 1)
ax.cla()
pars, vals = results['T']
ax.plot(pars, vals, label=r'$T$', lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('temperature')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
ax = plt.subplot(2, 2, 2 * ax_num + 2)
ax.cla()
pars, vals = results['dvel']
for ic in range(vals.shape[1]):
ax.plot(pars, vals[:, ic], label=r'$w_{%d}$' % (ic + 1),
lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('diffusion velocity')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
return fig, results
helps = {
'diffusivity' : 'the diffusivity coefficient [default: %(default)s]',
'ic_max' : 'the max. initial condition value [default: %(default)s]',
'order' : 'temperature field approximation order [default: %(default)s]',
'refine' : 'uniform mesh refinement level [default: %(default)s]',
'probe' : 'probe the results',
'show' : 'show the probing results figure, if --probe is used',
}
def main():
from sfepy import data_dir
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--diffusivity', metavar='float', type=float,
action='store', dest='diffusivity',
default=1e-5, help=helps['diffusivity'])
parser.add_argument('--ic-max', metavar='float', type=float,
action='store', dest='ic_max',
default=2.0, help=helps['ic_max'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=2, help=helps['order'])
parser.add_argument('-r', '--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-p', '--probe',
action="store_true", dest='probe',
default=False, help=helps['probe'])
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
assert_((0 < options.order),
'temperature approximation order must be at least 1!')
output('using values:')
output(' diffusivity:', options.diffusivity)
output(' max. IC value:', options.ic_max)
output('uniform mesh refinement level:', options.refine)
mesh = Mesh.from_file(data_dir + '/meshes/3d/cylinder.mesh')
domain = | FEDomain('domain', mesh) | sfepy.discrete.fem.FEDomain |
#!/usr/bin/env python
"""
Transient Laplace equation (heat equation) with non-constant initial conditions
given by a function, using commands for interactive use.
The script allows setting various simulation parameters, namely:
- the diffusivity coefficient
- the max. initial condition value
- temperature field approximation order
- uniform mesh refinement
The example shows also how to probe the results.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/diffusion/time_poisson_interactive.py -h
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.problem import prepare_matrix
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver
from sfepy.discrete.probes import LineProbe, CircleProbe
from sfepy.discrete.projections import project_by_component
def gen_probes(problem):
"""
Define a line probe and a circle probe.
"""
# Use enough points for higher order approximations.
n_point = 1000
p0, p1 = nm.array([0.0, 0.0, 0.0]), nm.array([0.1, 0.0, 0.0])
line = LineProbe(p0, p1, n_point, share_geometry=True)
# Workaround current probe code shortcoming.
line.set_options(close_limit=0.5)
centre = 0.5 * (p0 + p1)
normal = [0.0, 1.0, 0.0]
r = 0.019
circle = CircleProbe(centre, normal, r, n_point, share_geometry=True)
circle.set_options(close_limit=0.0)
probes = [line, circle]
labels = ['%s -> %s' % (p0, p1),
'circle(%s, %s, %s' % (centre, normal, r)]
return probes, labels
def probe_results(ax_num, T, dvel, probe, label):
"""
Probe the results using the given probe and plot the probed values.
"""
results = {}
pars, vals = probe(T)
results['T'] = (pars, vals)
pars, vals = probe(dvel)
results['dvel'] = (pars, vals)
fig = plt.figure(1)
ax = plt.subplot(2, 2, 2 * ax_num + 1)
ax.cla()
pars, vals = results['T']
ax.plot(pars, vals, label=r'$T$', lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('temperature')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
ax = plt.subplot(2, 2, 2 * ax_num + 2)
ax.cla()
pars, vals = results['dvel']
for ic in range(vals.shape[1]):
ax.plot(pars, vals[:, ic], label=r'$w_{%d}$' % (ic + 1),
lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('diffusion velocity')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
return fig, results
helps = {
'diffusivity' : 'the diffusivity coefficient [default: %(default)s]',
'ic_max' : 'the max. initial condition value [default: %(default)s]',
'order' : 'temperature field approximation order [default: %(default)s]',
'refine' : 'uniform mesh refinement level [default: %(default)s]',
'probe' : 'probe the results',
'show' : 'show the probing results figure, if --probe is used',
}
def main():
from sfepy import data_dir
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--diffusivity', metavar='float', type=float,
action='store', dest='diffusivity',
default=1e-5, help=helps['diffusivity'])
parser.add_argument('--ic-max', metavar='float', type=float,
action='store', dest='ic_max',
default=2.0, help=helps['ic_max'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=2, help=helps['order'])
parser.add_argument('-r', '--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-p', '--probe',
action="store_true", dest='probe',
default=False, help=helps['probe'])
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
assert_((0 < options.order),
'temperature approximation order must be at least 1!')
output('using values:')
output(' diffusivity:', options.diffusivity)
output(' max. IC value:', options.ic_max)
output('uniform mesh refinement level:', options.refine)
mesh = Mesh.from_file(data_dir + '/meshes/3d/cylinder.mesh')
domain = FEDomain('domain', mesh)
if options.refine > 0:
for ii in range(options.refine):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
omega = domain.create_region('Omega', 'all')
left = domain.create_region('Left',
'vertices in x < 0.00001', 'facet')
right = domain.create_region('Right',
'vertices in x > 0.099999', 'facet')
field = Field.from_args('fu', nm.float64, 'scalar', omega,
approx_order=options.order)
T = | FieldVariable('T', 'unknown', field, history=1) | sfepy.discrete.FieldVariable |
#!/usr/bin/env python
"""
Transient Laplace equation (heat equation) with non-constant initial conditions
given by a function, using commands for interactive use.
The script allows setting various simulation parameters, namely:
- the diffusivity coefficient
- the max. initial condition value
- temperature field approximation order
- uniform mesh refinement
The example shows also how to probe the results.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/diffusion/time_poisson_interactive.py -h
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.problem import prepare_matrix
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver
from sfepy.discrete.probes import LineProbe, CircleProbe
from sfepy.discrete.projections import project_by_component
def gen_probes(problem):
"""
Define a line probe and a circle probe.
"""
# Use enough points for higher order approximations.
n_point = 1000
p0, p1 = nm.array([0.0, 0.0, 0.0]), nm.array([0.1, 0.0, 0.0])
line = LineProbe(p0, p1, n_point, share_geometry=True)
# Workaround current probe code shortcoming.
line.set_options(close_limit=0.5)
centre = 0.5 * (p0 + p1)
normal = [0.0, 1.0, 0.0]
r = 0.019
circle = CircleProbe(centre, normal, r, n_point, share_geometry=True)
circle.set_options(close_limit=0.0)
probes = [line, circle]
labels = ['%s -> %s' % (p0, p1),
'circle(%s, %s, %s' % (centre, normal, r)]
return probes, labels
def probe_results(ax_num, T, dvel, probe, label):
"""
Probe the results using the given probe and plot the probed values.
"""
results = {}
pars, vals = probe(T)
results['T'] = (pars, vals)
pars, vals = probe(dvel)
results['dvel'] = (pars, vals)
fig = plt.figure(1)
ax = plt.subplot(2, 2, 2 * ax_num + 1)
ax.cla()
pars, vals = results['T']
ax.plot(pars, vals, label=r'$T$', lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('temperature')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
ax = plt.subplot(2, 2, 2 * ax_num + 2)
ax.cla()
pars, vals = results['dvel']
for ic in range(vals.shape[1]):
ax.plot(pars, vals[:, ic], label=r'$w_{%d}$' % (ic + 1),
lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('diffusion velocity')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
return fig, results
helps = {
'diffusivity' : 'the diffusivity coefficient [default: %(default)s]',
'ic_max' : 'the max. initial condition value [default: %(default)s]',
'order' : 'temperature field approximation order [default: %(default)s]',
'refine' : 'uniform mesh refinement level [default: %(default)s]',
'probe' : 'probe the results',
'show' : 'show the probing results figure, if --probe is used',
}
def main():
from sfepy import data_dir
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--diffusivity', metavar='float', type=float,
action='store', dest='diffusivity',
default=1e-5, help=helps['diffusivity'])
parser.add_argument('--ic-max', metavar='float', type=float,
action='store', dest='ic_max',
default=2.0, help=helps['ic_max'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=2, help=helps['order'])
parser.add_argument('-r', '--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-p', '--probe',
action="store_true", dest='probe',
default=False, help=helps['probe'])
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
assert_((0 < options.order),
'temperature approximation order must be at least 1!')
output('using values:')
output(' diffusivity:', options.diffusivity)
output(' max. IC value:', options.ic_max)
output('uniform mesh refinement level:', options.refine)
mesh = Mesh.from_file(data_dir + '/meshes/3d/cylinder.mesh')
domain = FEDomain('domain', mesh)
if options.refine > 0:
for ii in range(options.refine):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
omega = domain.create_region('Omega', 'all')
left = domain.create_region('Left',
'vertices in x < 0.00001', 'facet')
right = domain.create_region('Right',
'vertices in x > 0.099999', 'facet')
field = Field.from_args('fu', nm.float64, 'scalar', omega,
approx_order=options.order)
T = FieldVariable('T', 'unknown', field, history=1)
s = | FieldVariable('s', 'test', field, primary_var_name='T') | sfepy.discrete.FieldVariable |
#!/usr/bin/env python
"""
Transient Laplace equation (heat equation) with non-constant initial conditions
given by a function, using commands for interactive use.
The script allows setting various simulation parameters, namely:
- the diffusivity coefficient
- the max. initial condition value
- temperature field approximation order
- uniform mesh refinement
The example shows also how to probe the results.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/diffusion/time_poisson_interactive.py -h
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.problem import prepare_matrix
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver
from sfepy.discrete.probes import LineProbe, CircleProbe
from sfepy.discrete.projections import project_by_component
def gen_probes(problem):
"""
Define a line probe and a circle probe.
"""
# Use enough points for higher order approximations.
n_point = 1000
p0, p1 = nm.array([0.0, 0.0, 0.0]), nm.array([0.1, 0.0, 0.0])
line = LineProbe(p0, p1, n_point, share_geometry=True)
# Workaround current probe code shortcoming.
line.set_options(close_limit=0.5)
centre = 0.5 * (p0 + p1)
normal = [0.0, 1.0, 0.0]
r = 0.019
circle = CircleProbe(centre, normal, r, n_point, share_geometry=True)
circle.set_options(close_limit=0.0)
probes = [line, circle]
labels = ['%s -> %s' % (p0, p1),
'circle(%s, %s, %s' % (centre, normal, r)]
return probes, labels
def probe_results(ax_num, T, dvel, probe, label):
"""
Probe the results using the given probe and plot the probed values.
"""
results = {}
pars, vals = probe(T)
results['T'] = (pars, vals)
pars, vals = probe(dvel)
results['dvel'] = (pars, vals)
fig = plt.figure(1)
ax = plt.subplot(2, 2, 2 * ax_num + 1)
ax.cla()
pars, vals = results['T']
ax.plot(pars, vals, label=r'$T$', lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('temperature')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
ax = plt.subplot(2, 2, 2 * ax_num + 2)
ax.cla()
pars, vals = results['dvel']
for ic in range(vals.shape[1]):
ax.plot(pars, vals[:, ic], label=r'$w_{%d}$' % (ic + 1),
lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('diffusion velocity')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
return fig, results
helps = {
'diffusivity' : 'the diffusivity coefficient [default: %(default)s]',
'ic_max' : 'the max. initial condition value [default: %(default)s]',
'order' : 'temperature field approximation order [default: %(default)s]',
'refine' : 'uniform mesh refinement level [default: %(default)s]',
'probe' : 'probe the results',
'show' : 'show the probing results figure, if --probe is used',
}
def main():
from sfepy import data_dir
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--diffusivity', metavar='float', type=float,
action='store', dest='diffusivity',
default=1e-5, help=helps['diffusivity'])
parser.add_argument('--ic-max', metavar='float', type=float,
action='store', dest='ic_max',
default=2.0, help=helps['ic_max'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=2, help=helps['order'])
parser.add_argument('-r', '--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-p', '--probe',
action="store_true", dest='probe',
default=False, help=helps['probe'])
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
assert_((0 < options.order),
'temperature approximation order must be at least 1!')
output('using values:')
output(' diffusivity:', options.diffusivity)
output(' max. IC value:', options.ic_max)
output('uniform mesh refinement level:', options.refine)
mesh = Mesh.from_file(data_dir + '/meshes/3d/cylinder.mesh')
domain = FEDomain('domain', mesh)
if options.refine > 0:
for ii in range(options.refine):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
omega = domain.create_region('Omega', 'all')
left = domain.create_region('Left',
'vertices in x < 0.00001', 'facet')
right = domain.create_region('Right',
'vertices in x > 0.099999', 'facet')
field = Field.from_args('fu', nm.float64, 'scalar', omega,
approx_order=options.order)
T = FieldVariable('T', 'unknown', field, history=1)
s = FieldVariable('s', 'test', field, primary_var_name='T')
m = Material('m', diffusivity=options.diffusivity * nm.eye(3))
integral = | Integral('i', order=2*options.order) | sfepy.discrete.Integral |
#!/usr/bin/env python
"""
Transient Laplace equation (heat equation) with non-constant initial conditions
given by a function, using commands for interactive use.
The script allows setting various simulation parameters, namely:
- the diffusivity coefficient
- the max. initial condition value
- temperature field approximation order
- uniform mesh refinement
The example shows also how to probe the results.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/diffusion/time_poisson_interactive.py -h
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.problem import prepare_matrix
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver
from sfepy.discrete.probes import LineProbe, CircleProbe
from sfepy.discrete.projections import project_by_component
def gen_probes(problem):
"""
Define a line probe and a circle probe.
"""
# Use enough points for higher order approximations.
n_point = 1000
p0, p1 = nm.array([0.0, 0.0, 0.0]), nm.array([0.1, 0.0, 0.0])
line = LineProbe(p0, p1, n_point, share_geometry=True)
# Workaround current probe code shortcoming.
line.set_options(close_limit=0.5)
centre = 0.5 * (p0 + p1)
normal = [0.0, 1.0, 0.0]
r = 0.019
circle = CircleProbe(centre, normal, r, n_point, share_geometry=True)
circle.set_options(close_limit=0.0)
probes = [line, circle]
labels = ['%s -> %s' % (p0, p1),
'circle(%s, %s, %s' % (centre, normal, r)]
return probes, labels
def probe_results(ax_num, T, dvel, probe, label):
"""
Probe the results using the given probe and plot the probed values.
"""
results = {}
pars, vals = probe(T)
results['T'] = (pars, vals)
pars, vals = probe(dvel)
results['dvel'] = (pars, vals)
fig = plt.figure(1)
ax = plt.subplot(2, 2, 2 * ax_num + 1)
ax.cla()
pars, vals = results['T']
ax.plot(pars, vals, label=r'$T$', lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('temperature')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
ax = plt.subplot(2, 2, 2 * ax_num + 2)
ax.cla()
pars, vals = results['dvel']
for ic in range(vals.shape[1]):
ax.plot(pars, vals[:, ic], label=r'$w_{%d}$' % (ic + 1),
lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('diffusion velocity')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
return fig, results
helps = {
'diffusivity' : 'the diffusivity coefficient [default: %(default)s]',
'ic_max' : 'the max. initial condition value [default: %(default)s]',
'order' : 'temperature field approximation order [default: %(default)s]',
'refine' : 'uniform mesh refinement level [default: %(default)s]',
'probe' : 'probe the results',
'show' : 'show the probing results figure, if --probe is used',
}
def main():
from sfepy import data_dir
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--diffusivity', metavar='float', type=float,
action='store', dest='diffusivity',
default=1e-5, help=helps['diffusivity'])
parser.add_argument('--ic-max', metavar='float', type=float,
action='store', dest='ic_max',
default=2.0, help=helps['ic_max'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=2, help=helps['order'])
parser.add_argument('-r', '--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-p', '--probe',
action="store_true", dest='probe',
default=False, help=helps['probe'])
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
assert_((0 < options.order),
'temperature approximation order must be at least 1!')
output('using values:')
output(' diffusivity:', options.diffusivity)
output(' max. IC value:', options.ic_max)
output('uniform mesh refinement level:', options.refine)
mesh = Mesh.from_file(data_dir + '/meshes/3d/cylinder.mesh')
domain = FEDomain('domain', mesh)
if options.refine > 0:
for ii in range(options.refine):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
omega = domain.create_region('Omega', 'all')
left = domain.create_region('Left',
'vertices in x < 0.00001', 'facet')
right = domain.create_region('Right',
'vertices in x > 0.099999', 'facet')
field = Field.from_args('fu', nm.float64, 'scalar', omega,
approx_order=options.order)
T = FieldVariable('T', 'unknown', field, history=1)
s = FieldVariable('s', 'test', field, primary_var_name='T')
m = Material('m', diffusivity=options.diffusivity * nm.eye(3))
integral = Integral('i', order=2*options.order)
t1 = Term.new('dw_diffusion(m.diffusivity, s, T)',
integral, omega, m=m, s=s, T=T)
t2 = Term.new('dw_volume_dot(s, dT/dt)',
integral, omega, s=s, T=T)
eq = | Equation('balance', t1 + t2) | sfepy.discrete.Equation |
#!/usr/bin/env python
"""
Transient Laplace equation (heat equation) with non-constant initial conditions
given by a function, using commands for interactive use.
The script allows setting various simulation parameters, namely:
- the diffusivity coefficient
- the max. initial condition value
- temperature field approximation order
- uniform mesh refinement
The example shows also how to probe the results.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/diffusion/time_poisson_interactive.py -h
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.problem import prepare_matrix
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver
from sfepy.discrete.probes import LineProbe, CircleProbe
from sfepy.discrete.projections import project_by_component
def gen_probes(problem):
"""
Define a line probe and a circle probe.
"""
# Use enough points for higher order approximations.
n_point = 1000
p0, p1 = nm.array([0.0, 0.0, 0.0]), nm.array([0.1, 0.0, 0.0])
line = LineProbe(p0, p1, n_point, share_geometry=True)
# Workaround current probe code shortcoming.
line.set_options(close_limit=0.5)
centre = 0.5 * (p0 + p1)
normal = [0.0, 1.0, 0.0]
r = 0.019
circle = CircleProbe(centre, normal, r, n_point, share_geometry=True)
circle.set_options(close_limit=0.0)
probes = [line, circle]
labels = ['%s -> %s' % (p0, p1),
'circle(%s, %s, %s' % (centre, normal, r)]
return probes, labels
def probe_results(ax_num, T, dvel, probe, label):
"""
Probe the results using the given probe and plot the probed values.
"""
results = {}
pars, vals = probe(T)
results['T'] = (pars, vals)
pars, vals = probe(dvel)
results['dvel'] = (pars, vals)
fig = plt.figure(1)
ax = plt.subplot(2, 2, 2 * ax_num + 1)
ax.cla()
pars, vals = results['T']
ax.plot(pars, vals, label=r'$T$', lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('temperature')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
ax = plt.subplot(2, 2, 2 * ax_num + 2)
ax.cla()
pars, vals = results['dvel']
for ic in range(vals.shape[1]):
ax.plot(pars, vals[:, ic], label=r'$w_{%d}$' % (ic + 1),
lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('diffusion velocity')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
return fig, results
helps = {
'diffusivity' : 'the diffusivity coefficient [default: %(default)s]',
'ic_max' : 'the max. initial condition value [default: %(default)s]',
'order' : 'temperature field approximation order [default: %(default)s]',
'refine' : 'uniform mesh refinement level [default: %(default)s]',
'probe' : 'probe the results',
'show' : 'show the probing results figure, if --probe is used',
}
def main():
from sfepy import data_dir
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--diffusivity', metavar='float', type=float,
action='store', dest='diffusivity',
default=1e-5, help=helps['diffusivity'])
parser.add_argument('--ic-max', metavar='float', type=float,
action='store', dest='ic_max',
default=2.0, help=helps['ic_max'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=2, help=helps['order'])
parser.add_argument('-r', '--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-p', '--probe',
action="store_true", dest='probe',
default=False, help=helps['probe'])
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
assert_((0 < options.order),
'temperature approximation order must be at least 1!')
output('using values:')
output(' diffusivity:', options.diffusivity)
output(' max. IC value:', options.ic_max)
output('uniform mesh refinement level:', options.refine)
mesh = Mesh.from_file(data_dir + '/meshes/3d/cylinder.mesh')
domain = FEDomain('domain', mesh)
if options.refine > 0:
for ii in range(options.refine):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
omega = domain.create_region('Omega', 'all')
left = domain.create_region('Left',
'vertices in x < 0.00001', 'facet')
right = domain.create_region('Right',
'vertices in x > 0.099999', 'facet')
field = Field.from_args('fu', nm.float64, 'scalar', omega,
approx_order=options.order)
T = FieldVariable('T', 'unknown', field, history=1)
s = FieldVariable('s', 'test', field, primary_var_name='T')
m = Material('m', diffusivity=options.diffusivity * nm.eye(3))
integral = Integral('i', order=2*options.order)
t1 = Term.new('dw_diffusion(m.diffusivity, s, T)',
integral, omega, m=m, s=s, T=T)
t2 = Term.new('dw_volume_dot(s, dT/dt)',
integral, omega, s=s, T=T)
eq = Equation('balance', t1 + t2)
eqs = | Equations([eq]) | sfepy.discrete.Equations |
#!/usr/bin/env python
"""
Transient Laplace equation (heat equation) with non-constant initial conditions
given by a function, using commands for interactive use.
The script allows setting various simulation parameters, namely:
- the diffusivity coefficient
- the max. initial condition value
- temperature field approximation order
- uniform mesh refinement
The example shows also how to probe the results.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/diffusion/time_poisson_interactive.py -h
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.problem import prepare_matrix
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver
from sfepy.discrete.probes import LineProbe, CircleProbe
from sfepy.discrete.projections import project_by_component
def gen_probes(problem):
"""
Define a line probe and a circle probe.
"""
# Use enough points for higher order approximations.
n_point = 1000
p0, p1 = nm.array([0.0, 0.0, 0.0]), nm.array([0.1, 0.0, 0.0])
line = LineProbe(p0, p1, n_point, share_geometry=True)
# Workaround current probe code shortcoming.
line.set_options(close_limit=0.5)
centre = 0.5 * (p0 + p1)
normal = [0.0, 1.0, 0.0]
r = 0.019
circle = CircleProbe(centre, normal, r, n_point, share_geometry=True)
circle.set_options(close_limit=0.0)
probes = [line, circle]
labels = ['%s -> %s' % (p0, p1),
'circle(%s, %s, %s' % (centre, normal, r)]
return probes, labels
def probe_results(ax_num, T, dvel, probe, label):
"""
Probe the results using the given probe and plot the probed values.
"""
results = {}
pars, vals = probe(T)
results['T'] = (pars, vals)
pars, vals = probe(dvel)
results['dvel'] = (pars, vals)
fig = plt.figure(1)
ax = plt.subplot(2, 2, 2 * ax_num + 1)
ax.cla()
pars, vals = results['T']
ax.plot(pars, vals, label=r'$T$', lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('temperature')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
ax = plt.subplot(2, 2, 2 * ax_num + 2)
ax.cla()
pars, vals = results['dvel']
for ic in range(vals.shape[1]):
ax.plot(pars, vals[:, ic], label=r'$w_{%d}$' % (ic + 1),
lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('diffusion velocity')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
return fig, results
helps = {
'diffusivity' : 'the diffusivity coefficient [default: %(default)s]',
'ic_max' : 'the max. initial condition value [default: %(default)s]',
'order' : 'temperature field approximation order [default: %(default)s]',
'refine' : 'uniform mesh refinement level [default: %(default)s]',
'probe' : 'probe the results',
'show' : 'show the probing results figure, if --probe is used',
}
def main():
from sfepy import data_dir
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--diffusivity', metavar='float', type=float,
action='store', dest='diffusivity',
default=1e-5, help=helps['diffusivity'])
parser.add_argument('--ic-max', metavar='float', type=float,
action='store', dest='ic_max',
default=2.0, help=helps['ic_max'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=2, help=helps['order'])
parser.add_argument('-r', '--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-p', '--probe',
action="store_true", dest='probe',
default=False, help=helps['probe'])
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
assert_((0 < options.order),
'temperature approximation order must be at least 1!')
output('using values:')
output(' diffusivity:', options.diffusivity)
output(' max. IC value:', options.ic_max)
output('uniform mesh refinement level:', options.refine)
mesh = Mesh.from_file(data_dir + '/meshes/3d/cylinder.mesh')
domain = FEDomain('domain', mesh)
if options.refine > 0:
for ii in range(options.refine):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
omega = domain.create_region('Omega', 'all')
left = domain.create_region('Left',
'vertices in x < 0.00001', 'facet')
right = domain.create_region('Right',
'vertices in x > 0.099999', 'facet')
field = Field.from_args('fu', nm.float64, 'scalar', omega,
approx_order=options.order)
T = FieldVariable('T', 'unknown', field, history=1)
s = FieldVariable('s', 'test', field, primary_var_name='T')
m = Material('m', diffusivity=options.diffusivity * nm.eye(3))
integral = Integral('i', order=2*options.order)
t1 = Term.new('dw_diffusion(m.diffusivity, s, T)',
integral, omega, m=m, s=s, T=T)
t2 = Term.new('dw_volume_dot(s, dT/dt)',
integral, omega, s=s, T=T)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
# Boundary conditions.
ebc1 = EssentialBC('T1', left, {'T.0' : 2.0})
ebc2 = EssentialBC('T2', right, {'T.0' : -2.0})
# Initial conditions.
def get_ic(coors, ic):
x, y, z = coors.T
return 2 - 40.0 * x + options.ic_max * nm.sin(4 * nm.pi * x / 0.1)
ic_fun = | Function('ic_fun', get_ic) | sfepy.discrete.Function |
#!/usr/bin/env python
"""
Transient Laplace equation (heat equation) with non-constant initial conditions
given by a function, using commands for interactive use.
The script allows setting various simulation parameters, namely:
- the diffusivity coefficient
- the max. initial condition value
- temperature field approximation order
- uniform mesh refinement
The example shows also how to probe the results.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/diffusion/time_poisson_interactive.py -h
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.problem import prepare_matrix
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver
from sfepy.discrete.probes import LineProbe, CircleProbe
from sfepy.discrete.projections import project_by_component
def gen_probes(problem):
"""
Define a line probe and a circle probe.
"""
# Use enough points for higher order approximations.
n_point = 1000
p0, p1 = nm.array([0.0, 0.0, 0.0]), nm.array([0.1, 0.0, 0.0])
line = LineProbe(p0, p1, n_point, share_geometry=True)
# Workaround current probe code shortcoming.
line.set_options(close_limit=0.5)
centre = 0.5 * (p0 + p1)
normal = [0.0, 1.0, 0.0]
r = 0.019
circle = CircleProbe(centre, normal, r, n_point, share_geometry=True)
circle.set_options(close_limit=0.0)
probes = [line, circle]
labels = ['%s -> %s' % (p0, p1),
'circle(%s, %s, %s' % (centre, normal, r)]
return probes, labels
def probe_results(ax_num, T, dvel, probe, label):
"""
Probe the results using the given probe and plot the probed values.
"""
results = {}
pars, vals = probe(T)
results['T'] = (pars, vals)
pars, vals = probe(dvel)
results['dvel'] = (pars, vals)
fig = plt.figure(1)
ax = plt.subplot(2, 2, 2 * ax_num + 1)
ax.cla()
pars, vals = results['T']
ax.plot(pars, vals, label=r'$T$', lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('temperature')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
ax = plt.subplot(2, 2, 2 * ax_num + 2)
ax.cla()
pars, vals = results['dvel']
for ic in range(vals.shape[1]):
ax.plot(pars, vals[:, ic], label=r'$w_{%d}$' % (ic + 1),
lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('diffusion velocity')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
return fig, results
helps = {
'diffusivity' : 'the diffusivity coefficient [default: %(default)s]',
'ic_max' : 'the max. initial condition value [default: %(default)s]',
'order' : 'temperature field approximation order [default: %(default)s]',
'refine' : 'uniform mesh refinement level [default: %(default)s]',
'probe' : 'probe the results',
'show' : 'show the probing results figure, if --probe is used',
}
def main():
from sfepy import data_dir
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--diffusivity', metavar='float', type=float,
action='store', dest='diffusivity',
default=1e-5, help=helps['diffusivity'])
parser.add_argument('--ic-max', metavar='float', type=float,
action='store', dest='ic_max',
default=2.0, help=helps['ic_max'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=2, help=helps['order'])
parser.add_argument('-r', '--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-p', '--probe',
action="store_true", dest='probe',
default=False, help=helps['probe'])
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
assert_((0 < options.order),
'temperature approximation order must be at least 1!')
output('using values:')
output(' diffusivity:', options.diffusivity)
output(' max. IC value:', options.ic_max)
output('uniform mesh refinement level:', options.refine)
mesh = Mesh.from_file(data_dir + '/meshes/3d/cylinder.mesh')
domain = FEDomain('domain', mesh)
if options.refine > 0:
for ii in range(options.refine):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
omega = domain.create_region('Omega', 'all')
left = domain.create_region('Left',
'vertices in x < 0.00001', 'facet')
right = domain.create_region('Right',
'vertices in x > 0.099999', 'facet')
field = Field.from_args('fu', nm.float64, 'scalar', omega,
approx_order=options.order)
T = FieldVariable('T', 'unknown', field, history=1)
s = FieldVariable('s', 'test', field, primary_var_name='T')
m = Material('m', diffusivity=options.diffusivity * nm.eye(3))
integral = Integral('i', order=2*options.order)
t1 = Term.new('dw_diffusion(m.diffusivity, s, T)',
integral, omega, m=m, s=s, T=T)
t2 = Term.new('dw_volume_dot(s, dT/dt)',
integral, omega, s=s, T=T)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
# Boundary conditions.
ebc1 = EssentialBC('T1', left, {'T.0' : 2.0})
ebc2 = EssentialBC('T2', right, {'T.0' : -2.0})
# Initial conditions.
def get_ic(coors, ic):
x, y, z = coors.T
return 2 - 40.0 * x + options.ic_max * nm.sin(4 * nm.pi * x / 0.1)
ic_fun = Function('ic_fun', get_ic)
ic = InitialCondition('ic', omega, {'T.0' : ic_fun})
pb = | Problem('heat', equations=eqs) | sfepy.discrete.Problem |
#!/usr/bin/env python
"""
Transient Laplace equation (heat equation) with non-constant initial conditions
given by a function, using commands for interactive use.
The script allows setting various simulation parameters, namely:
- the diffusivity coefficient
- the max. initial condition value
- temperature field approximation order
- uniform mesh refinement
The example shows also how to probe the results.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/diffusion/time_poisson_interactive.py -h
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.problem import prepare_matrix
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver
from sfepy.discrete.probes import LineProbe, CircleProbe
from sfepy.discrete.projections import project_by_component
def gen_probes(problem):
"""
Define a line probe and a circle probe.
"""
# Use enough points for higher order approximations.
n_point = 1000
p0, p1 = nm.array([0.0, 0.0, 0.0]), nm.array([0.1, 0.0, 0.0])
line = LineProbe(p0, p1, n_point, share_geometry=True)
# Workaround current probe code shortcoming.
line.set_options(close_limit=0.5)
centre = 0.5 * (p0 + p1)
normal = [0.0, 1.0, 0.0]
r = 0.019
circle = CircleProbe(centre, normal, r, n_point, share_geometry=True)
circle.set_options(close_limit=0.0)
probes = [line, circle]
labels = ['%s -> %s' % (p0, p1),
'circle(%s, %s, %s' % (centre, normal, r)]
return probes, labels
def probe_results(ax_num, T, dvel, probe, label):
"""
Probe the results using the given probe and plot the probed values.
"""
results = {}
pars, vals = probe(T)
results['T'] = (pars, vals)
pars, vals = probe(dvel)
results['dvel'] = (pars, vals)
fig = plt.figure(1)
ax = plt.subplot(2, 2, 2 * ax_num + 1)
ax.cla()
pars, vals = results['T']
ax.plot(pars, vals, label=r'$T$', lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('temperature')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
ax = plt.subplot(2, 2, 2 * ax_num + 2)
ax.cla()
pars, vals = results['dvel']
for ic in range(vals.shape[1]):
ax.plot(pars, vals[:, ic], label=r'$w_{%d}$' % (ic + 1),
lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('diffusion velocity')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
return fig, results
helps = {
'diffusivity' : 'the diffusivity coefficient [default: %(default)s]',
'ic_max' : 'the max. initial condition value [default: %(default)s]',
'order' : 'temperature field approximation order [default: %(default)s]',
'refine' : 'uniform mesh refinement level [default: %(default)s]',
'probe' : 'probe the results',
'show' : 'show the probing results figure, if --probe is used',
}
def main():
from sfepy import data_dir
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--diffusivity', metavar='float', type=float,
action='store', dest='diffusivity',
default=1e-5, help=helps['diffusivity'])
parser.add_argument('--ic-max', metavar='float', type=float,
action='store', dest='ic_max',
default=2.0, help=helps['ic_max'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=2, help=helps['order'])
parser.add_argument('-r', '--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-p', '--probe',
action="store_true", dest='probe',
default=False, help=helps['probe'])
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
assert_((0 < options.order),
'temperature approximation order must be at least 1!')
output('using values:')
output(' diffusivity:', options.diffusivity)
output(' max. IC value:', options.ic_max)
output('uniform mesh refinement level:', options.refine)
mesh = Mesh.from_file(data_dir + '/meshes/3d/cylinder.mesh')
domain = FEDomain('domain', mesh)
if options.refine > 0:
for ii in range(options.refine):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
omega = domain.create_region('Omega', 'all')
left = domain.create_region('Left',
'vertices in x < 0.00001', 'facet')
right = domain.create_region('Right',
'vertices in x > 0.099999', 'facet')
field = Field.from_args('fu', nm.float64, 'scalar', omega,
approx_order=options.order)
T = FieldVariable('T', 'unknown', field, history=1)
s = FieldVariable('s', 'test', field, primary_var_name='T')
m = Material('m', diffusivity=options.diffusivity * nm.eye(3))
integral = Integral('i', order=2*options.order)
t1 = Term.new('dw_diffusion(m.diffusivity, s, T)',
integral, omega, m=m, s=s, T=T)
t2 = Term.new('dw_volume_dot(s, dT/dt)',
integral, omega, s=s, T=T)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
# Boundary conditions.
ebc1 = EssentialBC('T1', left, {'T.0' : 2.0})
ebc2 = EssentialBC('T2', right, {'T.0' : -2.0})
# Initial conditions.
def get_ic(coors, ic):
x, y, z = coors.T
return 2 - 40.0 * x + options.ic_max * nm.sin(4 * nm.pi * x / 0.1)
ic_fun = Function('ic_fun', get_ic)
ic = InitialCondition('ic', omega, {'T.0' : ic_fun})
pb = Problem('heat', equations=eqs)
pb.set_bcs(ebcs=Conditions([ebc1, ebc2]))
pb.set_ics(Conditions([ic]))
state0 = pb.get_initial_state()
init_fun, prestep_fun, _poststep_fun = pb.get_tss_functions(state0)
ls = | ScipyDirect({}) | sfepy.solvers.ls.ScipyDirect |
#!/usr/bin/env python
"""
Transient Laplace equation (heat equation) with non-constant initial conditions
given by a function, using commands for interactive use.
The script allows setting various simulation parameters, namely:
- the diffusivity coefficient
- the max. initial condition value
- temperature field approximation order
- uniform mesh refinement
The example shows also how to probe the results.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/diffusion/time_poisson_interactive.py -h
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.problem import prepare_matrix
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver
from sfepy.discrete.probes import LineProbe, CircleProbe
from sfepy.discrete.projections import project_by_component
def gen_probes(problem):
"""
Define a line probe and a circle probe.
"""
# Use enough points for higher order approximations.
n_point = 1000
p0, p1 = nm.array([0.0, 0.0, 0.0]), nm.array([0.1, 0.0, 0.0])
line = LineProbe(p0, p1, n_point, share_geometry=True)
# Workaround current probe code shortcoming.
line.set_options(close_limit=0.5)
centre = 0.5 * (p0 + p1)
normal = [0.0, 1.0, 0.0]
r = 0.019
circle = CircleProbe(centre, normal, r, n_point, share_geometry=True)
circle.set_options(close_limit=0.0)
probes = [line, circle]
labels = ['%s -> %s' % (p0, p1),
'circle(%s, %s, %s' % (centre, normal, r)]
return probes, labels
def probe_results(ax_num, T, dvel, probe, label):
"""
Probe the results using the given probe and plot the probed values.
"""
results = {}
pars, vals = probe(T)
results['T'] = (pars, vals)
pars, vals = probe(dvel)
results['dvel'] = (pars, vals)
fig = plt.figure(1)
ax = plt.subplot(2, 2, 2 * ax_num + 1)
ax.cla()
pars, vals = results['T']
ax.plot(pars, vals, label=r'$T$', lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('temperature')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
ax = plt.subplot(2, 2, 2 * ax_num + 2)
ax.cla()
pars, vals = results['dvel']
for ic in range(vals.shape[1]):
ax.plot(pars, vals[:, ic], label=r'$w_{%d}$' % (ic + 1),
lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('diffusion velocity')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
return fig, results
helps = {
'diffusivity' : 'the diffusivity coefficient [default: %(default)s]',
'ic_max' : 'the max. initial condition value [default: %(default)s]',
'order' : 'temperature field approximation order [default: %(default)s]',
'refine' : 'uniform mesh refinement level [default: %(default)s]',
'probe' : 'probe the results',
'show' : 'show the probing results figure, if --probe is used',
}
def main():
from sfepy import data_dir
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--diffusivity', metavar='float', type=float,
action='store', dest='diffusivity',
default=1e-5, help=helps['diffusivity'])
parser.add_argument('--ic-max', metavar='float', type=float,
action='store', dest='ic_max',
default=2.0, help=helps['ic_max'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=2, help=helps['order'])
parser.add_argument('-r', '--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-p', '--probe',
action="store_true", dest='probe',
default=False, help=helps['probe'])
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
assert_((0 < options.order),
'temperature approximation order must be at least 1!')
output('using values:')
output(' diffusivity:', options.diffusivity)
output(' max. IC value:', options.ic_max)
output('uniform mesh refinement level:', options.refine)
mesh = Mesh.from_file(data_dir + '/meshes/3d/cylinder.mesh')
domain = FEDomain('domain', mesh)
if options.refine > 0:
for ii in range(options.refine):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
omega = domain.create_region('Omega', 'all')
left = domain.create_region('Left',
'vertices in x < 0.00001', 'facet')
right = domain.create_region('Right',
'vertices in x > 0.099999', 'facet')
field = Field.from_args('fu', nm.float64, 'scalar', omega,
approx_order=options.order)
T = FieldVariable('T', 'unknown', field, history=1)
s = FieldVariable('s', 'test', field, primary_var_name='T')
m = Material('m', diffusivity=options.diffusivity * nm.eye(3))
integral = Integral('i', order=2*options.order)
t1 = Term.new('dw_diffusion(m.diffusivity, s, T)',
integral, omega, m=m, s=s, T=T)
t2 = Term.new('dw_volume_dot(s, dT/dt)',
integral, omega, s=s, T=T)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
# Boundary conditions.
ebc1 = EssentialBC('T1', left, {'T.0' : 2.0})
ebc2 = EssentialBC('T2', right, {'T.0' : -2.0})
# Initial conditions.
def get_ic(coors, ic):
x, y, z = coors.T
return 2 - 40.0 * x + options.ic_max * nm.sin(4 * nm.pi * x / 0.1)
ic_fun = Function('ic_fun', get_ic)
ic = InitialCondition('ic', omega, {'T.0' : ic_fun})
pb = Problem('heat', equations=eqs)
pb.set_bcs(ebcs=Conditions([ebc1, ebc2]))
pb.set_ics(Conditions([ic]))
state0 = pb.get_initial_state()
init_fun, prestep_fun, _poststep_fun = pb.get_tss_functions(state0)
ls = ScipyDirect({})
nls_status = | IndexedStruct() | sfepy.base.base.IndexedStruct |
#!/usr/bin/env python
"""
Transient Laplace equation (heat equation) with non-constant initial conditions
given by a function, using commands for interactive use.
The script allows setting various simulation parameters, namely:
- the diffusivity coefficient
- the max. initial condition value
- temperature field approximation order
- uniform mesh refinement
The example shows also how to probe the results.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/diffusion/time_poisson_interactive.py -h
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.problem import prepare_matrix
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver
from sfepy.discrete.probes import LineProbe, CircleProbe
from sfepy.discrete.projections import project_by_component
def gen_probes(problem):
"""
Define a line probe and a circle probe.
"""
# Use enough points for higher order approximations.
n_point = 1000
p0, p1 = nm.array([0.0, 0.0, 0.0]), nm.array([0.1, 0.0, 0.0])
line = LineProbe(p0, p1, n_point, share_geometry=True)
# Workaround current probe code shortcoming.
line.set_options(close_limit=0.5)
centre = 0.5 * (p0 + p1)
normal = [0.0, 1.0, 0.0]
r = 0.019
circle = CircleProbe(centre, normal, r, n_point, share_geometry=True)
circle.set_options(close_limit=0.0)
probes = [line, circle]
labels = ['%s -> %s' % (p0, p1),
'circle(%s, %s, %s' % (centre, normal, r)]
return probes, labels
def probe_results(ax_num, T, dvel, probe, label):
"""
Probe the results using the given probe and plot the probed values.
"""
results = {}
pars, vals = probe(T)
results['T'] = (pars, vals)
pars, vals = probe(dvel)
results['dvel'] = (pars, vals)
fig = plt.figure(1)
ax = plt.subplot(2, 2, 2 * ax_num + 1)
ax.cla()
pars, vals = results['T']
ax.plot(pars, vals, label=r'$T$', lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('temperature')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
ax = plt.subplot(2, 2, 2 * ax_num + 2)
ax.cla()
pars, vals = results['dvel']
for ic in range(vals.shape[1]):
ax.plot(pars, vals[:, ic], label=r'$w_{%d}$' % (ic + 1),
lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('diffusion velocity')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
return fig, results
helps = {
'diffusivity' : 'the diffusivity coefficient [default: %(default)s]',
'ic_max' : 'the max. initial condition value [default: %(default)s]',
'order' : 'temperature field approximation order [default: %(default)s]',
'refine' : 'uniform mesh refinement level [default: %(default)s]',
'probe' : 'probe the results',
'show' : 'show the probing results figure, if --probe is used',
}
def main():
from sfepy import data_dir
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--diffusivity', metavar='float', type=float,
action='store', dest='diffusivity',
default=1e-5, help=helps['diffusivity'])
parser.add_argument('--ic-max', metavar='float', type=float,
action='store', dest='ic_max',
default=2.0, help=helps['ic_max'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=2, help=helps['order'])
parser.add_argument('-r', '--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-p', '--probe',
action="store_true", dest='probe',
default=False, help=helps['probe'])
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
assert_((0 < options.order),
'temperature approximation order must be at least 1!')
output('using values:')
output(' diffusivity:', options.diffusivity)
output(' max. IC value:', options.ic_max)
output('uniform mesh refinement level:', options.refine)
mesh = Mesh.from_file(data_dir + '/meshes/3d/cylinder.mesh')
domain = FEDomain('domain', mesh)
if options.refine > 0:
for ii in range(options.refine):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
omega = domain.create_region('Omega', 'all')
left = domain.create_region('Left',
'vertices in x < 0.00001', 'facet')
right = domain.create_region('Right',
'vertices in x > 0.099999', 'facet')
field = Field.from_args('fu', nm.float64, 'scalar', omega,
approx_order=options.order)
T = FieldVariable('T', 'unknown', field, history=1)
s = FieldVariable('s', 'test', field, primary_var_name='T')
m = Material('m', diffusivity=options.diffusivity * nm.eye(3))
integral = Integral('i', order=2*options.order)
t1 = Term.new('dw_diffusion(m.diffusivity, s, T)',
integral, omega, m=m, s=s, T=T)
t2 = Term.new('dw_volume_dot(s, dT/dt)',
integral, omega, s=s, T=T)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
# Boundary conditions.
ebc1 = EssentialBC('T1', left, {'T.0' : 2.0})
ebc2 = EssentialBC('T2', right, {'T.0' : -2.0})
# Initial conditions.
def get_ic(coors, ic):
x, y, z = coors.T
return 2 - 40.0 * x + options.ic_max * nm.sin(4 * nm.pi * x / 0.1)
ic_fun = Function('ic_fun', get_ic)
ic = InitialCondition('ic', omega, {'T.0' : ic_fun})
pb = Problem('heat', equations=eqs)
pb.set_bcs(ebcs=Conditions([ebc1, ebc2]))
pb.set_ics(Conditions([ic]))
state0 = pb.get_initial_state()
init_fun, prestep_fun, _poststep_fun = pb.get_tss_functions(state0)
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton({'is_linear' : True}, lin_solver=ls, status=nls_status)
tss = SimpleTimeSteppingSolver({'t0' : 0.0, 't1' : 100.0, 'n_step' : 11},
nls=nls, context=pb, verbose=True)
pb.set_solver(tss)
if options.probe:
# Prepare probe data.
probes, labels = gen_probes(pb)
ev = pb.evaluate
order = 2 * (options.order - 1)
gfield = Field.from_args('gu', nm.float64, 'vector', omega,
approx_order=options.order - 1)
dvel = FieldVariable('dvel', 'parameter', gfield,
primary_var_name='(set-to-None)')
cfield = Field.from_args('gu', nm.float64, 'scalar', omega,
approx_order=options.order - 1)
component = FieldVariable('component', 'parameter', cfield,
primary_var_name='(set-to-None)')
nls_options = {'eps_a' : 1e-16, 'i_max' : 1}
suffix = tss.ts.suffix
def poststep_fun(ts, vec):
_poststep_fun(ts, vec)
# Probe the solution.
dvel_qp = ev('ev_diffusion_velocity.%d.Omega(m.diffusivity, T)'
% order, copy_materials=False, mode='qp')
project_by_component(dvel, dvel_qp, component, order,
nls_options=nls_options)
all_results = []
for ii, probe in enumerate(probes):
fig, results = probe_results(ii, T, dvel, probe, labels[ii])
all_results.append(results)
plt.tight_layout()
fig.savefig('time_poisson_interactive_probe_%s.png'
% (suffix % ts.step), bbox_inches='tight')
for ii, results in enumerate(all_results):
output('probe %d (%s):' % (ii, probes[ii].name))
output.level += 2
for key, res in ordered_iteritems(results):
output(key + ':')
val = res[1]
output(' min: %+.2e, mean: %+.2e, max: %+.2e'
% (val.min(), val.mean(), val.max()))
output.level -= 2
else:
poststep_fun = _poststep_fun
pb.time_update(tss.ts)
state0.apply_ebc()
# This is required if {'is_linear' : True} is passed to Newton.
mtx = | prepare_matrix(pb, state0) | sfepy.discrete.problem.prepare_matrix |
#!/usr/bin/env python
"""
Transient Laplace equation (heat equation) with non-constant initial conditions
given by a function, using commands for interactive use.
The script allows setting various simulation parameters, namely:
- the diffusivity coefficient
- the max. initial condition value
- temperature field approximation order
- uniform mesh refinement
The example shows also how to probe the results.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/diffusion/time_poisson_interactive.py -h
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.problem import prepare_matrix
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver
from sfepy.discrete.probes import LineProbe, CircleProbe
from sfepy.discrete.projections import project_by_component
def gen_probes(problem):
"""
Define a line probe and a circle probe.
"""
# Use enough points for higher order approximations.
n_point = 1000
p0, p1 = nm.array([0.0, 0.0, 0.0]), nm.array([0.1, 0.0, 0.0])
line = LineProbe(p0, p1, n_point, share_geometry=True)
# Workaround current probe code shortcoming.
line.set_options(close_limit=0.5)
centre = 0.5 * (p0 + p1)
normal = [0.0, 1.0, 0.0]
r = 0.019
circle = CircleProbe(centre, normal, r, n_point, share_geometry=True)
circle.set_options(close_limit=0.0)
probes = [line, circle]
labels = ['%s -> %s' % (p0, p1),
'circle(%s, %s, %s' % (centre, normal, r)]
return probes, labels
def probe_results(ax_num, T, dvel, probe, label):
"""
Probe the results using the given probe and plot the probed values.
"""
results = {}
pars, vals = probe(T)
results['T'] = (pars, vals)
pars, vals = probe(dvel)
results['dvel'] = (pars, vals)
fig = plt.figure(1)
ax = plt.subplot(2, 2, 2 * ax_num + 1)
ax.cla()
pars, vals = results['T']
ax.plot(pars, vals, label=r'$T$', lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('temperature')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
ax = plt.subplot(2, 2, 2 * ax_num + 2)
ax.cla()
pars, vals = results['dvel']
for ic in range(vals.shape[1]):
ax.plot(pars, vals[:, ic], label=r'$w_{%d}$' % (ic + 1),
lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('diffusion velocity')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
return fig, results
helps = {
'diffusivity' : 'the diffusivity coefficient [default: %(default)s]',
'ic_max' : 'the max. initial condition value [default: %(default)s]',
'order' : 'temperature field approximation order [default: %(default)s]',
'refine' : 'uniform mesh refinement level [default: %(default)s]',
'probe' : 'probe the results',
'show' : 'show the probing results figure, if --probe is used',
}
def main():
from sfepy import data_dir
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--diffusivity', metavar='float', type=float,
action='store', dest='diffusivity',
default=1e-5, help=helps['diffusivity'])
parser.add_argument('--ic-max', metavar='float', type=float,
action='store', dest='ic_max',
default=2.0, help=helps['ic_max'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=2, help=helps['order'])
parser.add_argument('-r', '--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-p', '--probe',
action="store_true", dest='probe',
default=False, help=helps['probe'])
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
assert_((0 < options.order),
'temperature approximation order must be at least 1!')
output('using values:')
output(' diffusivity:', options.diffusivity)
output(' max. IC value:', options.ic_max)
output('uniform mesh refinement level:', options.refine)
mesh = Mesh.from_file(data_dir + '/meshes/3d/cylinder.mesh')
domain = FEDomain('domain', mesh)
if options.refine > 0:
for ii in range(options.refine):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
omega = domain.create_region('Omega', 'all')
left = domain.create_region('Left',
'vertices in x < 0.00001', 'facet')
right = domain.create_region('Right',
'vertices in x > 0.099999', 'facet')
field = Field.from_args('fu', nm.float64, 'scalar', omega,
approx_order=options.order)
T = FieldVariable('T', 'unknown', field, history=1)
s = FieldVariable('s', 'test', field, primary_var_name='T')
m = Material('m', diffusivity=options.diffusivity * nm.eye(3))
integral = Integral('i', order=2*options.order)
t1 = Term.new('dw_diffusion(m.diffusivity, s, T)',
integral, omega, m=m, s=s, T=T)
t2 = Term.new('dw_volume_dot(s, dT/dt)',
integral, omega, s=s, T=T)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
# Boundary conditions.
ebc1 = EssentialBC('T1', left, {'T.0' : 2.0})
ebc2 = EssentialBC('T2', right, {'T.0' : -2.0})
# Initial conditions.
def get_ic(coors, ic):
x, y, z = coors.T
return 2 - 40.0 * x + options.ic_max * nm.sin(4 * nm.pi * x / 0.1)
ic_fun = Function('ic_fun', get_ic)
ic = InitialCondition('ic', omega, {'T.0' : ic_fun})
pb = Problem('heat', equations=eqs)
pb.set_bcs(ebcs=Conditions([ebc1, ebc2]))
pb.set_ics(Conditions([ic]))
state0 = pb.get_initial_state()
init_fun, prestep_fun, _poststep_fun = pb.get_tss_functions(state0)
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton({'is_linear' : True}, lin_solver=ls, status=nls_status)
tss = SimpleTimeSteppingSolver({'t0' : 0.0, 't1' : 100.0, 'n_step' : 11},
nls=nls, context=pb, verbose=True)
pb.set_solver(tss)
if options.probe:
# Prepare probe data.
probes, labels = gen_probes(pb)
ev = pb.evaluate
order = 2 * (options.order - 1)
gfield = Field.from_args('gu', nm.float64, 'vector', omega,
approx_order=options.order - 1)
dvel = FieldVariable('dvel', 'parameter', gfield,
primary_var_name='(set-to-None)')
cfield = Field.from_args('gu', nm.float64, 'scalar', omega,
approx_order=options.order - 1)
component = FieldVariable('component', 'parameter', cfield,
primary_var_name='(set-to-None)')
nls_options = {'eps_a' : 1e-16, 'i_max' : 1}
suffix = tss.ts.suffix
def poststep_fun(ts, vec):
_poststep_fun(ts, vec)
# Probe the solution.
dvel_qp = ev('ev_diffusion_velocity.%d.Omega(m.diffusivity, T)'
% order, copy_materials=False, mode='qp')
project_by_component(dvel, dvel_qp, component, order,
nls_options=nls_options)
all_results = []
for ii, probe in enumerate(probes):
fig, results = probe_results(ii, T, dvel, probe, labels[ii])
all_results.append(results)
plt.tight_layout()
fig.savefig('time_poisson_interactive_probe_%s.png'
% (suffix % ts.step), bbox_inches='tight')
for ii, results in enumerate(all_results):
output('probe %d (%s):' % (ii, probes[ii].name))
output.level += 2
for key, res in ordered_iteritems(results):
output(key + ':')
val = res[1]
output(' min: %+.2e, mean: %+.2e, max: %+.2e'
% (val.min(), val.mean(), val.max()))
output.level -= 2
else:
poststep_fun = _poststep_fun
pb.time_update(tss.ts)
state0.apply_ebc()
# This is required if {'is_linear' : True} is passed to Newton.
mtx = prepare_matrix(pb, state0)
pb.try_presolve(mtx)
tss_status = | IndexedStruct() | sfepy.base.base.IndexedStruct |