This is a read only copy of the old FEniCS QA forum. Please visit the new QA forum to ask questions

Speed decrease when using 'snes' nonlinear solver

+2 votes

Hi all, I see a significant speed decrease when I switch to using 'snes' as my nonlinear solver, and am wondering why? Can anyone enlighten me?
Demo is a modification to the Cahn-Hilliard demo. I run it with the 'time' command and consistently see 'snes' increasing the runtime by ~10%.
Thanks!

import random
from dolfin import *

# Class representing the intial conditions
class InitialConditions(Expression):
    def __init__(self):
        random.seed(2 + MPI.process_number())
    def eval(self, values, x):
        values[0] = 0.63 + 0.02*(0.5 - random.random())
        values[1] = 0.0
    def value_shape(self):
        return (2,)

# Class for interfacing with the Newton solver
class CahnHilliardEquation(NonlinearProblem):
    def __init__(self, a, L):
        NonlinearProblem.__init__(self)
        self.L = L
        self.a = a
        self.reset_sparsity = True
    def F(self, b, x):
        assemble(self.L, tensor=b)
    def J(self, A, x):
        assemble(self.a, tensor=A, reset_sparsity=self.reset_sparsity)
        self.reset_sparsity = False

# Model parameters
lmbda  = 1.0e-02  # surface parameter
dt     = 5.0e-06  # time step
theta  = 0.5      # time stepping family, e.g. theta=1 -> backward Euler, theta=0.5 -> Crank-Nicolson

# Form compiler options
parameters["form_compiler"]["optimize"]     = True
parameters["form_compiler"]["cpp_optimize"] = True
parameters["form_compiler"]["representation"] = "quadrature"

# Create mesh and define function spaces
mesh = UnitSquareMesh(96, 96)
V = FunctionSpace(mesh, "Lagrange", 1)
ME = V*V

# Define trial and test functions
du    = TrialFunction(ME)
q, v  = TestFunctions(ME)

# Define functions
u   = Function(ME)  # current solution
u0  = Function(ME)  # solution from previous converged step

# Split mixed functions
dc, dmu = split(du)
c,  mu  = split(u)
c0, mu0 = split(u0)

# Create intial conditions and interpolate
u_init = InitialConditions()
u.interpolate(u_init)
u0.interpolate(u_init)

# Compute the chemical potential df/dc
c = variable(c)
f    = 100*c**2*(1-c)**2
dfdc = diff(f, c)

# mu_(n+theta)
mu_mid = (1.0-theta)*mu0 + theta*mu

# Weak statement of the equations
L0 = c*q*dx - c0*q*dx + dt*dot(grad(mu_mid), grad(q))*dx
L1 = mu*v*dx - dfdc*v*dx - lmbda*dot(grad(c), grad(v))*dx
L = L0 + L1

# Compute directional derivative about u in the direction of du (Jacobian)
a = derivative(L, u, du)

problem = NonlinearVariationalProblem(L, u, [], a)
solver = NonlinearVariationalSolver(problem)
#solver.parameters["nonlinear_solver"] = "snes"
solver.parameters["newton_solver"]["linear_solver"] = "gmres"
#info(solver.parameters, verbose=True)

# Step in time
t = 0.0
T = 50*dt
while (t < T):
    t += dt
    u0.vector()[:] = u.vector()
    solver.solve() 
    file << (u.split()[0], t)
asked May 5, 2014 by mwelland FEniCS User (8,410 points)

Did you check with other parameters, problem sizes and/or in parallel?

Yep, everything that I can see is the same in both this demo and in my other model. Uncomment the "#info(solver.parameters, verbose=True)" line too see for yourself.

...