In this tutorial we solve the problem
$$\begin{cases} -\Delta u = f, & \text{in } \Omega,\\ \nabla u \cdot\mathbf{n} = g, & \text{on } \partial\Omega, \end{cases}$$
where $\Omega$ is a ball in 2D. The forcing term $f$ and the Neumann data $g$ are such that they satisfy the condition $$\int_\Omega f \; dx = - \int_{\partial\Omega} g \; ds $$ which is a necessary condition for the existence of the solution to this pure Neumann problem, and which can be easily obtained multiplying the first equation in the system by the constant 1 and integrating by parts. Note that the solution is determined up to a constant.
The domain $\Omega$ is decomposed in $\Omega = \Omega_1 \cup \Omega_2$ with $\Gamma$ denoting the interface between the two subdomains, and $f$ is assumed to be constant on $\Omega_1$ and $\Omega_2$, respectively. This tutorial shows how to create a nullspace, as required by pure Neumann boundary conditions on $\partial\Omega$, first in a case without domain decomposition and then in a case with domain decomposition (i.e., restrictions).
import typing
import dolfinx.fem
import dolfinx.fem.petsc
import dolfinx.io
import dolfinx.la
import gmsh
import mpi4py.MPI
import numpy as np
import petsc4py.PETSc
import ufl
import viskex
import multiphenicsx.fem
import multiphenicsx.fem.petsc
r = 3
mesh_size = 1. / 4.
gmsh.initialize()
gmsh.model.add("mesh")
p0 = gmsh.model.geo.addPoint(0.0, 0.0, 0.0, mesh_size)
p1 = gmsh.model.geo.addPoint(0.0, +r, 0.0, mesh_size)
p2 = gmsh.model.geo.addPoint(0.0, -r, 0.0, mesh_size)
c0 = gmsh.model.geo.addCircleArc(p1, p0, p2)
c1 = gmsh.model.geo.addCircleArc(p2, p0, p1)
l0 = gmsh.model.geo.addLine(p2, p1)
line_loop_left = gmsh.model.geo.addCurveLoop([c0, l0])
line_loop_right = gmsh.model.geo.addCurveLoop([c1, -l0])
semicircle_left = gmsh.model.geo.addPlaneSurface([line_loop_left])
semicircle_right = gmsh.model.geo.addPlaneSurface([line_loop_right])
gmsh.model.geo.synchronize()
gmsh.model.addPhysicalGroup(1, [c0], 1)
gmsh.model.addPhysicalGroup(1, [c1], 2)
gmsh.model.addPhysicalGroup(1, [l0], 3)
gmsh.model.addPhysicalGroup(2, [semicircle_left], 1)
gmsh.model.addPhysicalGroup(2, [semicircle_right], 2)
gmsh.model.mesh.generate(2)
Info : Meshing 1D... Info : [ 0%] Meshing curve 1 (Circle) Info : [ 40%] Meshing curve 2 (Circle) Info : [ 70%] Meshing curve 3 (Line) Info : Done meshing 1D (Wall 0.000375508s, CPU 0.00072s) Info : Meshing 2D... Info : [ 0%] Meshing surface 1 (Plane, Frontal-Delaunay) Info : [ 60%] Meshing surface 2 (Plane, Frontal-Delaunay) Info : Done meshing 2D (Wall 0.0115519s, CPU 0.011965s) Info : 589 nodes 1201 elements
partitioner = dolfinx.mesh.create_cell_partitioner(dolfinx.mesh.GhostMode.shared_facet)
mesh, subdomains, boundaries_and_interfaces = dolfinx.io.gmshio.model_to_mesh(
gmsh.model, comm=mpi4py.MPI.COMM_WORLD, rank=0, gdim=2, partitioner=partitioner)
gmsh.finalize()
# Create connectivities required by the rest of the code
mesh.topology.create_connectivity(mesh.topology.dim - 1, mesh.topology.dim)
mesh.topology.create_connectivity(mesh.topology.dim, mesh.topology.dim)
cells_Omega1 = subdomains.indices[subdomains.values == 1]
cells_Omega2 = subdomains.indices[subdomains.values == 2]
facets_Gamma = boundaries_and_interfaces.indices[boundaries_and_interfaces.values == 3]
integration_entities_on_Gamma = dolfinx.fem.compute_integration_domains(
dolfinx.fem.IntegralType.interior_facet, mesh.topology, facets_Gamma)
integration_entities_on_Gamma_reshaped = integration_entities_on_Gamma.reshape(-1, 4)
connected_cells_to_Gamma = integration_entities_on_Gamma_reshaped[:, [0, 2]]
subdomain_ordering = (
subdomains.values[connected_cells_to_Gamma[:, 0]] < subdomains.values[connected_cells_to_Gamma[:, 1]])
if len(subdomain_ordering) > 0 and any(subdomain_ordering):
integration_entities_on_Gamma_reshaped[subdomain_ordering] = integration_entities_on_Gamma_reshaped[
subdomain_ordering][:, [2, 3, 0, 1]]
integration_entities_on_Gamma = integration_entities_on_Gamma_reshaped.flatten()
# Define associated measures
dx = ufl.Measure("dx", subdomain_data=subdomains)
ds = ufl.Measure("ds", subdomain_data=boundaries_and_interfaces)
dS = ufl.Measure("dS", domain=mesh, subdomain_data=[(3, np.array(integration_entities_on_Gamma, dtype=np.int32))])
dS = dS(3) # restrict to the interface, which has facet ID equal to 3
viskex.dolfinx.plot_mesh(mesh)
error: XDG_RUNTIME_DIR is invalid or not set in the environment. MESA: error: ZINK: failed to choose pdev glx: failed to create drisw screen
viskex.dolfinx.plot_mesh_tags(mesh, subdomains, "subdomains")
error: XDG_RUNTIME_DIR is invalid or not set in the environment.
MESA: error: ZINK: failed to choose pdev glx: failed to create drisw screen
viskex.dolfinx.plot_mesh_tags(mesh, boundaries_and_interfaces, "boundaries and interfaces")
error: XDG_RUNTIME_DIR is invalid or not set in the environment. MESA: error: ZINK: failed to choose pdev glx: failed to create drisw screen
Note that this part does not actually require multiphenicsx
.
# Create finite element space
V = dolfinx.fem.functionspace(mesh, ("Lagrange", 2))
# Define trial and test functions
u = ufl.TrialFunction(V)
v = ufl.TestFunction(V)
Denote by $r$ the radius of the circular domain $\Omega$. We next define the forcing term $f$ as $$f = \begin{cases}1, & \text{in }\Omega_1,\\ 2, & \text{in }\Omega_2. \end{cases}$$ Assume that the boundary data $g$ is constant on $\partial\Omega$. Imposing the necessary condition for existence one can find the following formula for $g$: $$ g = - \frac{3}{4} r, \qquad \text{on }\partial\Omega.$$ The following cells verify the validity of the necessary condition, up to a tolerance proportional to the mesh size.
one = dolfinx.fem.Constant(mesh, petsc4py.PETSc.ScalarType(1))
area_Omega1 = mesh.comm.allreduce(
dolfinx.fem.assemble_scalar(dolfinx.fem.form(one * dx(1))), op=mpi4py.MPI.SUM)
area_Omega2 = mesh.comm.allreduce(
dolfinx.fem.assemble_scalar(dolfinx.fem.form(one * dx(2))), op=mpi4py.MPI.SUM)
length_partial_Omega = mesh.comm.allreduce(
dolfinx.fem.assemble_scalar(dolfinx.fem.form(one * ds)), op=mpi4py.MPI.SUM)
assert np.isclose(area_Omega1, np.pi * r**2 / 2, atol=1e-1)
assert np.isclose(area_Omega2, np.pi * r**2 / 2, atol=1e-1)
assert np.isclose(length_partial_Omega, 2 * np.pi * r, atol=1e-2)
f1 = 1.0
f2 = 2.0
g = - 3.0 / 4.0 * r
assert np.isclose(f1 * area_Omega1 + f2 * area_Omega2, - g * length_partial_Omega, atol=1e-1)
The weak formulation of the problem is therefore $$ \text{find }u \in V(\Omega) := H^1(\Omega) $$ s.t. $$ \int_{\Omega} \nabla u \cdot \nabla v dx = \int_{\Omega} f \; v dx + \int_{\partial\Omega} g \; v ds.$$
# Define problem forms
a = ufl.inner(ufl.grad(u), ufl.grad(v)) * dx
f = ufl.inner(f1, v) * dx(1) + ufl.inner(f2, v) * dx(2) + ufl.inner(g, v) * ds
a_cpp = dolfinx.fem.form(a)
f_cpp = dolfinx.fem.form(f)
# Assemble the discrete system
A = dolfinx.fem.petsc.assemble_matrix(a_cpp)
A.assemble()
F = dolfinx.fem.petsc.assemble_vector(f_cpp)
F.ghostUpdate(addv=petsc4py.PETSc.InsertMode.ADD, mode=petsc4py.PETSc.ScatterMode.REVERSE)
However, the formulation above still needs to account for the fact that the solution is determined up to a constant. In other words, the matrix A
has a null space associated to the finite element representation of every constant function.
c = dolfinx.fem.Function(V)
c.interpolate(lambda x: np.ones(x.shape[1]))
C = c.x.petsc_vec
assert np.allclose(C.array, 1.0) # because V is a Lagrange space
# MatNullSpaceCreate expects the vectors to be orthonormal, which in this case simply
# means that we should normalize the vector C
C.scale(1 / C.norm())
assert np.isclose(C.norm(), 1.0)
# Create the PETSc nullspace vector and check that it is a valid nullspace of A
nullspace = petsc4py.PETSc.NullSpace().create(vectors=[C], comm=mesh.comm)
assert nullspace.test(A)
# For convenience, we explicitly inform PETSc that A is symmetric, so that it automatically
# sets the nullspace of A^T too (see the documentation of MatSetNullSpace).
A.setOption(petsc4py.PETSc.Mat.Option.SYMMETRIC, True)
A.setOption(petsc4py.PETSc.Mat.Option.SYMMETRY_ETERNAL, True)
# Set the nullspace
A.setNullSpace(nullspace)
The documentation of PETSc
suggests to orthogonalize F
to the null space of A^T
. We note that this is theoretically unnecessary here, because $g$ has been chosen to satisfy the compatibility condition with f
. Still, we carry out the orthogonalization anyway because, in practice, the finite element vector F
actually has a component in the null space of A^T
e.g. because of the fact that the mesh does not represent perfectly a circle.
# Orthogonalize F to the null space of A^T
nullspace.remove(F)
We next configure a direct solver with MUMPS
to solve the linear system.
Note that MUMPS
requires to explicitly set two options in the case of singular linear systems.
solution = dolfinx.fem.Function(V)
ksp = petsc4py.PETSc.KSP()
ksp.create(mesh.comm)
ksp.setOperators(A)
ksp.setType("preonly")
ksp.getPC().setType("lu")
ksp.getPC().setFactorSolverType("mumps")
ksp.getPC().setFactorSetUpSolverType()
ksp.getPC().getFactorMatrix().setMumpsIcntl(icntl=24, ival=1) # detect null pivots
ksp.getPC().getFactorMatrix().setMumpsIcntl(icntl=25, ival=0) # do not compute null space again
ksp.setFromOptions()
ksp.solve(F, solution.x.petsc_vec)
solution.x.petsc_vec.ghostUpdate(
addv=petsc4py.PETSc.InsertMode.INSERT, mode=petsc4py.PETSc.ScatterMode.FORWARD)
ksp.destroy()
<petsc4py.PETSc.KSP at 0x7f634c297740>
viskex.dolfinx.plot_scalar_field(solution, "u")
error: XDG_RUNTIME_DIR is invalid or not set in the environment. MESA: error: ZINK: failed to choose pdev glx: failed to create drisw screen
We finally note that setting the null space is not equivalent to imposing a constraint $$\int_\Omega u \; dx = 0$$ like in the "Singular Poisson" demo in legacy FEniCS. In other words, the linear solver will fix the undetermined constant in a way that the solution does not necessarily have zero average. If interested in enforcing the zero average constraint, the user can postprocess the obtained solution simply subtracting the average of the computed solution.
def compute_average( # type: ignore[no-any-unimported]
u: typing.Union[dolfinx.fem.Function, tuple[dolfinx.fem.Function, dolfinx.fem.Function]]
) -> petsc4py.PETSc.ScalarType:
"""Compute average of the solution."""
if not isinstance(u, tuple):
u = (u, u)
else:
assert len(u) == 2
return mesh.comm.allreduce(
dolfinx.fem.assemble_scalar(dolfinx.fem.form(u[0] * dx(1) + u[1] * dx(2))), op=mpi4py.MPI.SUM
) / (area_Omega1 + area_Omega2)
def subtract_average( # type: ignore[no-any-unimported]
u: dolfinx.fem.Function, average_u: petsc4py.PETSc.ScalarType,
active_dofs: typing.Optional[np.typing.NDArray[np.int32]] = None
) -> None:
"""Post-process the solution so that it has zero average."""
with u.x.petsc_vec.localForm() as u_local:
if active_dofs is None:
u_local[:] -= average_u
else:
u_local[active_dofs] -= average_u
average_u = compute_average(solution)
assert np.isclose(average_u, 0.11117, atol=1e-5)
subtract_average(solution, average_u)
assert np.isclose(compute_average(solution), 0.0)
del average_u
We next perform a domain decomposition of $\Omega$ as $\Omega_1 \cup \Omega_2$. Similarly to the interface example in tutorial 03, we need to introduce a lagrange multiplier to handle the continuity of the solution across the interface $\Gamma$ between $\Omega_1$ and $\Omega_2$.
The resulting weak formulation is: $$ \text{find }u_1 \in V(\Omega_1), u_2 \in V(\Omega_2), \eta \in E(\Gamma) \subset L^2(\Gamma) $$ s.t. $$ \begin{align*} \int_{\Omega_1} \nabla u_1 \cdot \nabla v_1 dx + \int_{\Gamma} \lambda \; v_1 ds = \int_{\Omega_1} f \; v_1 dx + \int_{\partial\Omega_1 \setminus \Gamma} g \; v_1 ds, & \qquad \forall v_1 \in V(\Omega_1)\\ \int_{\Omega_2} \nabla u_2 \cdot \nabla v_2 dx - \int_{\Gamma} \lambda \; v_2 ds = \int_{\Omega_2} f \; v_2 dx + \int_{\partial\Omega_2 \setminus \Gamma} g \; v_2 ds, & \qquad \forall v_2 \in V(\Omega_2)\\ \int_{\Gamma} \eta \; (u_1 - u_2) ds = 0, & \qquad \forall \eta \in E(\Gamma). \end{align*} $$
Also in this case the solution $u_1$ and $u_2$ are defined up to a constant.
# Define function spaces
V1 = V.clone()
V2 = V.clone()
M = V.clone()
# Define restrictions
dofs_V1_Omega1 = dolfinx.fem.locate_dofs_topological(V1, subdomains.dim, cells_Omega1)
dofs_V2_Omega2 = dolfinx.fem.locate_dofs_topological(V2, subdomains.dim, cells_Omega2)
dofs_M_Gamma = dolfinx.fem.locate_dofs_topological(M, boundaries_and_interfaces.dim, facets_Gamma)
restriction_V1_Omega1 = multiphenicsx.fem.DofMapRestriction(V1.dofmap, dofs_V1_Omega1)
restriction_V2_Omega2 = multiphenicsx.fem.DofMapRestriction(V2.dofmap, dofs_V2_Omega2)
restriction_M_Gamma = multiphenicsx.fem.DofMapRestriction(M.dofmap, dofs_M_Gamma)
restriction = [restriction_V1_Omega1, restriction_V2_Omega2, restriction_M_Gamma]
# Define trial and test functions
(u1, u2, l) = (ufl.TrialFunction(V1), ufl.TrialFunction(V2), ufl.TrialFunction(M))
(v1, v2, m) = (ufl.TestFunction(V1), ufl.TestFunction(V2), ufl.TestFunction(M))
# Define problem block forms
zero = dolfinx.fem.Constant(mesh, petsc4py.PETSc.ScalarType(0))
a_dd = [[ufl.inner(ufl.grad(u1), ufl.grad(v1)) * dx(1), None, ufl.inner(l("-"), v1("-")) * dS],
[None, ufl.inner(ufl.grad(u2), ufl.grad(v2)) * dx(2), - ufl.inner(l("+"), v2("+")) * dS],
[ufl.inner(u1("-"), m("-")) * dS, - ufl.inner(u2("+"), m("+")) * dS, None]]
f_dd = [ufl.inner(f1, v1) * dx(1) + ufl.inner(g, v1) * ds(1),
ufl.inner(f2, v2) * dx(2) + ufl.inner(g, v2) * ds(2),
ufl.inner(zero, m("-")) * dS]
a_dd_cpp = dolfinx.fem.form(a_dd)
f_dd_cpp = dolfinx.fem.form(f_dd)
# Assemble the block linear system
A_dd = multiphenicsx.fem.petsc.assemble_matrix_block(a_dd_cpp, restriction=(restriction, restriction))
A_dd.assemble()
F_dd = multiphenicsx.fem.petsc.assemble_vector_block(f_dd_cpp, a_dd_cpp, restriction=restriction)
We then set a null space which specifies that $u_1$ and $u_2$ are determined up to a constant $c_1$ and $c_2$, respectively. The null space will consist of a block vector C_dd
, which entries are grouped in the following way: first, DOFs associated to $V_1$, then DOFs associated to $V_2$, finally DOFs associated to $M$. The block vector is initialized to zero, and its non-zero entries are as constructed follows:
C
associated to every DOF of $V$ in $\Omega_1$ into the corresponding DOFs of $V_1$ in the first block of C_dd
;C
associated to every DOF of $V$ in $\Omega_2$ into the corresponding DOFs of $V_2$ in the second block of C_dd
;C_dd
equal to zero.This does indeed construct a null space for the block system because:
C
was a vector composed of all entries with the same value, the non-zero entries of C_dd
as a result of 1 have the same value of the non-zero entries filled as a result of 2. This implies that $c_1$ is actually equal to $c_2$, and therefore the left-hand side of the third equation $\int_{\Gamma} \eta \; (u_1 - u_2) ds$ is equal to zero when $u_1 = c_1 = c_2 = u_2$.C_dd = multiphenicsx.fem.petsc.create_vector_block(f_dd_cpp, restriction=restriction)
with multiphenicsx.fem.petsc.BlockVecSubVectorWrapper(
C_dd, [V1.dofmap, V2.dofmap, M.dofmap], restriction) as C_dd_wrapper:
for C_dd_component_local, data_vector in zip(C_dd_wrapper, (C, C, None)):
if data_vector is not None: # skip third block
with data_vector.localForm() as data_vector_local:
C_dd_component_local[:] = data_vector_local
Then, create a nullspace using a similar code as for the case without domain decomposition.
# MatNullSpaceCreate expects the vectors to be orthonormal
C_dd.scale(1 / C_dd.norm())
assert np.isclose(C_dd.norm(), 1.0)
# Create the PETSc nullspace vector and check that it is a valid nullspace of A_dd
nullspace_dd = petsc4py.PETSc.NullSpace().create(vectors=[C_dd], comm=mesh.comm)
assert nullspace_dd.test(A_dd)
# Inform PETSc that A_dd is symmetric
A_dd.setOption(petsc4py.PETSc.Mat.Option.SYMMETRIC, True)
A_dd.setOption(petsc4py.PETSc.Mat.Option.SYMMETRY_ETERNAL, True)
# Set the nullspace
A_dd.setNullSpace(nullspace_dd)
# Orthogonalize F_dd to the null space of A_dd^T
nullspace_dd.remove(F_dd)
Finally, solve the system with the same solver as in the case without domain decomposition.
# Solve
u1u2l_dd = multiphenicsx.fem.petsc.create_vector_block(f_dd_cpp, restriction=restriction)
ksp = petsc4py.PETSc.KSP()
ksp.create(mesh.comm)
ksp.setOperators(A_dd)
ksp.setType("preonly")
ksp.getPC().setType("lu")
ksp.getPC().setFactorSolverType("mumps")
ksp.getPC().setFactorSetUpSolverType()
ksp.getPC().getFactorMatrix().setMumpsIcntl(icntl=24, ival=1)
ksp.getPC().getFactorMatrix().setMumpsIcntl(icntl=25, ival=0)
ksp.setFromOptions()
ksp.solve(F_dd, u1u2l_dd)
u1u2l_dd.ghostUpdate(
addv=petsc4py.PETSc.InsertMode.INSERT, mode=petsc4py.PETSc.ScatterMode.FORWARD)
ksp.destroy()
<petsc4py.PETSc.KSP at 0x7f633dfa5350>
# Split the block solution in components
(u1_dd, u2_dd, l_dd) = (dolfinx.fem.Function(V1), dolfinx.fem.Function(V2), dolfinx.fem.Function(M))
with multiphenicsx.fem.petsc.BlockVecSubVectorWrapper(
u1u2l_dd, [V1.dofmap, V2.dofmap, M.dofmap], restriction) as u1u2l_wrapper:
for u1u2l_wrapper_local, component in zip(u1u2l_wrapper, (u1_dd, u2_dd, l_dd)):
with component.x.petsc_vec.localForm() as component_local:
component_local[:] = u1u2l_wrapper_local
u1u2l_dd.destroy()
<petsc4py.PETSc.Vec at 0x7f633df8d580>
viskex.dolfinx.plot_scalar_field(u1_dd, "u1")
error: XDG_RUNTIME_DIR is invalid or not set in the environment. MESA: error: ZINK: failed to choose pdev glx: failed to create drisw screen
viskex.dolfinx.plot_scalar_field(u2_dd, "u2")
error: XDG_RUNTIME_DIR is invalid or not set in the environment. MESA: error: ZINK: failed to choose pdev glx: failed to create drisw screen
viskex.dolfinx.plot_scalar_field(l_dd, "l")
error: XDG_RUNTIME_DIR is invalid or not set in the environment. MESA: error: ZINK: failed to choose pdev glx: failed to create drisw screen
In order to make a comparison to $u$ in the final section of this notebook, we subtract the average of the sum of the solutions $u_1$ and $u_2$, since there is no guarantee that the solutions without and with domain decomposition set the same constant.
average_u_dd = compute_average((u1_dd, u2_dd))
assert np.isclose(average_u_dd, 0.09797, atol=1e-5)
subtract_average(u1_dd, average_u_dd, dofs_V1_Omega1)
subtract_average(u2_dd, average_u_dd, dofs_V2_Omega2)
assert np.isclose(compute_average((u1_dd, u2_dd)), 0.0)
del average_u_dd
u_norm_Omega1 = np.sqrt(mesh.comm.allreduce(
dolfinx.fem.assemble_scalar(dolfinx.fem.form(ufl.inner(solution, solution) * dx(1))),
op=mpi4py.MPI.SUM))
u_norm_Omega2 = np.sqrt(mesh.comm.allreduce(
dolfinx.fem.assemble_scalar(dolfinx.fem.form(ufl.inner(solution, solution) * dx(2))),
op=mpi4py.MPI.SUM))
err1_norm_dd = np.sqrt(mesh.comm.allreduce(
dolfinx.fem.assemble_scalar(dolfinx.fem.form(ufl.inner(solution - u1_dd, solution - u1_dd) * dx(1))),
op=mpi4py.MPI.SUM))
err2_norm_dd = np.sqrt(mesh.comm.allreduce(
dolfinx.fem.assemble_scalar(dolfinx.fem.form(ufl.inner(solution - u2_dd, solution - u2_dd) * dx(2))),
op=mpi4py.MPI.SUM))
print("Relative error on subdomain 1", err1_norm_dd / u_norm_Omega1)
print("Relative error on subdomain 2", err2_norm_dd / u_norm_Omega2)
assert np.isclose(err1_norm_dd / u_norm_Omega1, 0., atol=3.e-5)
assert np.isclose(err2_norm_dd / u_norm_Omega2, 0., atol=3.e-5)
Relative error on subdomain 1 1.7748890630074804e-05 Relative error on subdomain 2 2.1324721787908028e-05