Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Updating demo_axis.py and demo_pml.py for parallel computing #3433

Merged
merged 10 commits into from
Sep 26, 2024
40 changes: 28 additions & 12 deletions python/demo/demo_axis.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,20 +27,14 @@

import dolfinx

if PETSc.IntType == np.int64 and MPI.COMM_WORLD.size > 1:
print("This solver fails with PETSc and 64-bit integers becaude of memory errors in MUMPS.")
# Note: when PETSc.IntType == np.int32, superlu_dist is used
# rather than MUMPS and does not trigger memory failures.
exit(0)

# The time-harmonic Maxwell equation is complex-valued PDE. PETSc
# must therefore have compiled with complex scalars.
if not np.issubdtype(PETSc.ScalarType, np.complexfloating):
if not np.issubdtype(PETSc.ScalarType, np.complexfloating): # type: ignore
print("Demo can only be executed when PETSc using complex scalars.")
exit(0)

scalar_type = PETSc.ScalarType
real_type = PETSc.RealType
scalar_type = PETSc.ScalarType # type: ignore
real_type = PETSc.RealType # type: ignore

if not dolfinx.has_petsc:
print("This demo requires DOLFINx to be compiled with PETSc enabled.")
Expand Down Expand Up @@ -463,7 +457,10 @@ def create_eps_mu(pml, rho, eps_bkg, mu_bkg):
)

model = MPI.COMM_WORLD.bcast(model, root=0)
msh, cell_tags, facet_tags = io.gmshio.model_to_mesh(model, MPI.COMM_WORLD, 0, gdim=2)
partitioner = dolfinx.cpp.mesh.create_cell_partitioner(dolfinx.mesh.GhostMode.shared_facet)
msh, cell_tags, facet_tags = io.gmshio.model_to_mesh(
model, MPI.COMM_WORLD, 0, gdim=2, partitioner=partitioner
)

gmsh.finalize()
MPI.COMM_WORLD.barrier()
Expand Down Expand Up @@ -651,9 +648,28 @@ def create_eps_mu(pml, rho, eps_bkg, mu_bkg):
+ k0**2 * ufl.inner(eps_pml * Es_m, v_m) * rho * dPml
)
a, L = ufl.lhs(F), ufl.rhs(F)

problem = LinearProblem(a, L, bcs=[], petsc_options={"ksp_type": "preonly", "pc_type": "lu"})
sys = PETSc.Sys() # type: ignore
if sys.hasExternalPackage("superlu_dist"): # type: ignore
mat_factor_backend = "superlu_dist"
elif sys.hasExternalPackage("mumps"): # type: ignore
mat_factor_backend = "mumps"
else:
if msh.comm > 1:
raise RuntimeError("This demo requires a parallel linear algebra backend.")
else:
mat_factor_backend = "petsc"
problem = LinearProblem(
a,
L,
bcs=[],
petsc_options={
"ksp_type": "preonly",
"pc_type": "lu",
"pc_factor_mat_solver_type": mat_factor_backend,
},
)
Esh_m = problem.solve()
assert problem.solver.getConvergedReason() > 0, "Solver did not converge!"

# Scattered magnetic field
Hsh_m = -1j * curl_axis(Esh_m, m, rho) / (Z0 * k0)
Expand Down
40 changes: 29 additions & 11 deletions python/demo/demo_pml.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,13 +64,6 @@

from petsc4py import PETSc

if PETSc.IntType == np.int64 and MPI.COMM_WORLD.size > 1:
print("This solver fails with PETSc and 64-bit integers becaude of memory errors in MUMPS.")
# Note: when PETSc.IntType == np.int32, superlu_dist is used rather
# than MUMPS and does not trigger memory failures.
exit(0)
# -

# Since we want to solve time-harmonic Maxwell's equation, we require
# that the demo is executed with DOLFINx (PETSc) complex mode.

Expand Down Expand Up @@ -121,7 +114,6 @@ def generate_mesh_wire(
scatt_tag: int,
pml_tag: int,
):
gmsh.model.add("nanowire")
dim = 2
# A dummy circle for setting a finer mesh
c1 = gmsh.model.occ.addCircle(0.0, 0.0, 0.0, radius_wire * 0.8, angle1=0.0, angle2=2 * np.pi)
Expand Down Expand Up @@ -208,7 +200,7 @@ def generate_mesh_wire(
gmsh.model.addPhysicalGroup(dim, y_group, tag=pml_tag + 2)

# Marker interior surface in bkg group
boundaries: list[np.tynp.ping.NDArray[np.int32]] = []
boundaries: list[np.typing.NDArray[np.int32]] = []
for tag in bkg_group:
boundary_pairs = gmsh.model.get_boundary([(dim, tag)], oriented=False)
boundaries.append(np.asarray([pair[1] for pair in boundary_pairs], dtype=np.int32))
Expand Down Expand Up @@ -457,7 +449,11 @@ def pml_coordinates(x: ufl.indexed.Indexed, alpha: float, k0: complex, l_dom: fl
pml_tag,
)
model = MPI.COMM_WORLD.bcast(model, root=0)
msh, cell_tags, facet_tags = gmshio.model_to_mesh(model, MPI.COMM_WORLD, 0, gdim=2)
partitioner = dolfinx.cpp.mesh.create_cell_partitioner(dolfinx.mesh.GhostMode.shared_facet)

msh, cell_tags, facet_tags = gmshio.model_to_mesh(
model, MPI.COMM_WORLD, 0, gdim=2, partitioner=partitioner
)

gmsh.finalize()
MPI.COMM_WORLD.barrier()
Expand Down Expand Up @@ -699,8 +695,30 @@ def create_eps_mu(

a, L = ufl.lhs(F), ufl.rhs(F)

problem = LinearProblem(a, L, bcs=[], petsc_options={"ksp_type": "preonly", "pc_type": "lu"})
# For factorisation prefer superlu_dist, then MUMPS, then default
sys = PETSc.Sys() # type: ignore
if sys.hasExternalPackage("superlu_dist"): # type: ignore
mat_factor_backend = "superlu_dist"
elif sys.hasExternalPackage("mumps"): # type: ignore
mat_factor_backend = "mumps"
else:
if msh.comm > 1:
raise RuntimeError("This demo requires a parallel linear algebra backend.")
else:
mat_factor_backend = "petsc"

problem = LinearProblem(
a,
L,
bcs=[],
petsc_options={
"ksp_type": "preonly",
"pc_type": "lu",
"pc_factor_mat_solver_type": mat_factor_backend,
},
)
Esh = problem.solve()
assert problem.solver.getConvergedReason() > 0, "Solver did not converge!"
# -

# Let's now save the solution in a `bp`-file. In order to do so, we need
Expand Down
Loading