My goal is to be able to solve two different meshes in parallel. Presumably this involves splitting the MPI_COMM_WORLD communicator and passing the modified communicator to the UnitCubeMesh.
For some reason this leads to one of the split sections of the communicator never finishing.
This is the output that I get:
mpirun -np 3 ./demo_poisson
WORLD RANK/SIZE: 0/3 ROW RANK/SIZE: 0/2
WORLD RANK/SIZE: 1/3 ROW RANK/SIZE: 0/1
WORLD RANK/SIZE: 2/3 ROW RANK/SIZE: 1/2
Process 0: Number of global vertices: 27
Process 0: Number of global cells: 48
Mesh MPI opinion: 2 0 1
Mesh MPI opinion: 2 1 0
Mesh MPI opinion: 1 0 0
Process 1: Solving linear variational problem.
Process 2: Solving linear variational problem.
Process 0: Solving linear variational problem.
Process 1: Applying boundary conditions to linear system.
Time taken by processor 1: 0.052966
It then hangs indefinitely.
When running with a mpirun -np 2 ./demo_poisson it does not hang, any number larger than 3 causes PETSC to error with code 63.
Am I doing something wrong?
This is my full code:
main.cpp
#include <dolfin.h>
#include "Poisson.h"
using namespace dolfin;
// Source term (right-hand side)
class Source : public Expression
{
void eval(Array<double>& values, const Array<double>& x) const
{
double dx = x[0] - 0.5;
double dy = x[1] - 0.5;
values[0] = 10*exp(-(dx*dx + dy*dy) / 0.02);
}
};
// Normal derivative (Neumann boundary condition)
class dUdN : public Expression
{
void eval(Array<double>& values, const Array<double>& x) const
{
values[0] = sin(5*x[0]);
}
};
// Sub domain for Dirichlet boundary condition
class DirichletBoundary : public SubDomain
{
bool inside(const Array<double>& x, bool on_boundary) const
{
return x[0] < DOLFIN_EPS or x[0] > 1.0 - DOLFIN_EPS;
}
};
int main()
{
// Initialise MPI
MPI_Init(NULL, NULL);
set_log_level(PROGRESS);
// Create a new MPI communicator!
int worldRank, worldSize;
MPI_Comm_rank(MPI_COMM_WORLD, &worldRank);
MPI_Comm_size(MPI_COMM_WORLD, &worldSize);
// Determine color based on rank
int color = worldRank % 2;
// Split the communicator based on the color and use the
// original rank for ordering
MPI_Comm newComm;
MPI_Comm_split(MPI_COMM_WORLD, color, worldRank, &newComm);
int newRank, newSize;
MPI_Comm_rank(newComm, &newRank);
MPI_Comm_size(newComm, &newSize);
printf("WORLD RANK/SIZE: %d/%d \t ROW RANK/SIZE: %d/%d\n",
worldRank, worldSize, newRank, newSize);
// Use a different mesh for each communicator
int ePerSide = 2;
if (newRank != worldRank){
ePerSide = 16;
}
// Create mesh and function space
UnitCubeMesh mesh(newComm, ePerSide, ePerSide, ePerSide);
printf("Mesh MPI opinion: %d %d %d\n", MPI::size(mesh.mpi_comm()),MPI::is_receiver(mesh.mpi_comm()),MPI::is_broadcaster(mesh.mpi_comm()) );
Poisson::FunctionSpace V(mesh);
// Define boundary condition
Constant u0(0.0);
DirichletBoundary boundary;
DirichletBC bc(V, u0, boundary);
// Define variational forms
Poisson::BilinearForm a(V, V);
Poisson::LinearForm L(V);
Source f;
dUdN g;
L.f = f;
L.g = g;
// Compute solution
Function u(V);
double startTime = MPI_Wtime();
LinearVariationalProblem problem(a, L, u, bc);
LinearVariationalSolver solver(problem);
solver.parameters["linear_solver"] = "cg";
solver.solve();
double endTime = MPI_Wtime();
printf("Time taken by processor %d: %f\n",worldRank,endTime-startTime);
MPI_Comm_free(&newComm);
MPI_Barrier(MPI_COMM_WORLD);
return 0;
}
Poisson.ufl
element = FiniteElement("Lagrange", tetrahedron, 1)
u = TrialFunction(element)
v = TestFunction(element)
f = Coefficient(element)
g = Coefficient(element)
a = inner(grad(u), grad(v))*dx
L = f*v*dx + g*v*ds