Hello,
I am trying to solve the 1D mixed heat equation applying some control by using the Neuman boundary condition. The code made in Fenics 2016.2.0 is bellow
class NewtonInterface(NonlinearProblem):
def __init__(self, a, L):
NonlinearProblem.__init__(self)
self.L = L
self.a = a
def F(self, b, x):
assemble(self.L, tensor=b)
def J(self, A, x):
assemble(self.a, tensor=A)
#define left boundary
class BoundaryLeft(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and \
(near(x[0], 0.))
#define right boundary
class BoundaryRight(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and \
(near(x[0], 10.))
#Defining the function that aplies the POD control
def apply_control(self,g):
u_temp = interpolate(u_0,ME)
u.vector()[:] = u_temp.vector()[:]
L = L_prime - dt * g * vw * ds(0)
L = action(L,u)
a = derivative(L, u, du)
T = 5.0
num_steps = 50
dt = T / (num_steps) #Time step
# Create mesh and define function space
nel = 100
I = IntervalMesh(nel, 0, 10)
V_elem = FiniteElement("Lagrange", I.ufl_cell(), 1)
W_elem = VectorElement("Lagrange", I.ufl_cell(), 1)
ME_elem = MixedElement([V_elem, W_elem])
ME = FunctionSpace(I, ME_elem)
###############################################################################
#Define the boundaries
boundaries = FacetFunction('size_t', I)
boundaryLeft = BoundaryLeft()
boundaryRight = BoundaryRight()
boundaryLeft.mark(boundaries,0)
boundaryRight.mark(boundaries,1)
ds = ds(subdomain_data=boundaries)
###############################################################################
#Define variational problem
#Trial functions and split
du = TrialFunction(ME)
dw, dsigma = split(du)
#Test functions
vw, vsigma = TestFunctions(ME)
u = Function(ME)
w, sigma = split(u)
#Previous time step solution
u_n = Function(ME)
w_n, sigma_n = split(u_n)
# Define initial value
u_0 = Expression(('pow(x[0],2)','2*x[0]'),degree = 2)
w_0,sigma_0 = split(u_0)
w_n = interpolate(w_0,ME.sub(0).collapse())
w = interpolate(w_0,ME.sub(0).collapse())
sigma_n = interpolate(sigma_0,ME.sub(1).collapse())
sigma = interpolate(sigma_0,ME.sub(1).collapse())
#Problem conditions
alpha = 0.3
def f(u):
return exp(- 1 / u) - alpha * u
g = Expression('sin(t)',degree = 2,t=0)
h = Constant(0)
L0 = w * vw * dx - w_n * vw * dx + dt * (inner(sigma, grad(vw)) * dx - f(w) * vw * dx - h * vw * ds(1))
L1 = inner(sigma,vsigma) * dx - inner(grad(w), vsigma)*dx
L_prime = L0 + L1
t = 0.0
apply_control(g)
solver = NewtonSolver()
solver.parameters["linear_solver"] = "lu"
solver.parameters["convergence_criterion"] = "incremental"
solver.parameters["relative_tolerance"] = 1e-6
test = NewtonInterface(a, L)
vtkfile = File('TPWL_test/function_test.pvd')
vtkfile << (u.split()[0], t)
for n in range(num_steps):
#Update current time
t += dt
g.t = t
u_n.vector()[:] = u.vector()
solver.solve(test, u.vector())
vtkfile << (u.split()[0], t)
Then I get the following error:
line 82, in <module>
w_n = interpolate(w_0,ME.sub(0).collapse())
File "/usr/lib/python2.7/dist-packages/dolfin/fem/interpolation.py", line 66, in interpolate
Pv.interpolate(v)
TypeError: in method 'Function_interpolate', argument 2 of type 'dolfin::GenericFunction const &'
I don't know whats happening, can anybody please help me?
Thanks