MFEM  v3.4
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
maxwell_solver.cpp
Go to the documentation of this file.
1 // Copyright (c) 2010, Lawrence Livermore National Security, LLC. Produced at
2 // the Lawrence Livermore National Laboratory. LLNL-CODE-443211. All Rights
3 // reserved. See file COPYRIGHT for details.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability see http://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the GNU Lesser General Public License (as published by the Free
10 // Software Foundation) version 2.1 dated February 1999.
11 
12 #include "maxwell_solver.hpp"
13 
14 #ifdef MFEM_USE_MPI
15 
16 using namespace std;
17 
18 namespace mfem
19 {
20 
21 using namespace miniapps;
22 
23 namespace electromagnetics
24 {
25 
26 // Used for combining scalar coefficients
27 double prodFunc(double a, double b) { return a * b; }
28 
29 MaxwellSolver::MaxwellSolver(ParMesh & pmesh, int order,
30  double (*eps )(const Vector&),
31  double (*muInv )(const Vector&),
32  double (*sigma )(const Vector&),
33  void (*j_src )(const Vector&, double, Vector&),
34  Array<int> & abcs,
35  Array<int> & dbcs,
36  void (*dEdt_bc )(const Vector&, double, Vector&))
37  : myid_(0),
38  num_procs_(1),
39  order_(order),
40  logging_(1),
41  dtMax_(-1.0),
42  dtScale_(1.0e6),
43  pmesh_(&pmesh),
44  HCurlFESpace_(NULL),
45  HDivFESpace_(NULL),
46  hDivMassMuInv_(NULL),
47  hCurlLosses_(NULL),
48  weakCurlMuInv_(NULL),
49  Curl_(NULL),
50  e_(NULL),
51  b_(NULL),
52  j_(NULL),
53  dedt_(NULL),
54  rhs_(NULL),
55  jd_(NULL),
56  M1Losses_(NULL),
57  M2MuInv_(NULL),
58  NegCurl_(NULL),
59  WeakCurlMuInv_(NULL),
60  E_(NULL),
61  B_(NULL),
62  HD_(NULL),
63  RHS_(NULL),
64  epsCoef_(NULL),
65  muInvCoef_(NULL),
66  sigmaCoef_(NULL),
67  etaInvCoef_(NULL),
68  eCoef_(NULL),
69  bCoef_(NULL),
70  jCoef_(NULL),
71  dEdtBCCoef_(NULL),
72  eps_(eps),
73  muInv_(muInv),
74  sigma_(sigma),
75  j_src_(j_src),
76  dEdt_bc_(dEdt_bc),
77  visit_dc_(NULL)
78 {
79  // Initialize MPI variables
80  MPI_Comm_size(pmesh_->GetComm(), &num_procs_);
81  MPI_Comm_rank(pmesh_->GetComm(), &myid_);
82 
83  // Define compatible parallel finite element spaces on the parallel
84  // mesh. Here we use arbitrary order H1, Nedelec, and Raviart-Thomas finite
85  // elements.
86  HCurlFESpace_ = new ND_ParFESpace(pmesh_,order_,pmesh_->Dimension());
87  HDivFESpace_ = new RT_ParFESpace(pmesh_,order_,pmesh_->Dimension());
88 
89  this->height = HCurlFESpace_->GlobalTrueVSize();
90  this->width = HDivFESpace_->GlobalTrueVSize();
91 
92  // Check for absorbing materials or boundaries
93  lossy_ = abcs.Size() > 0 || sigma_ != NULL;
94 
95  // Require implicit handling of loss terms
96  type = lossy_ ? IMPLICIT : EXPLICIT;
97 
98  // Electric permittivity
99  if ( eps_ == NULL )
100  {
101  epsCoef_ = new ConstantCoefficient(epsilon0_);
102  }
103  else
104  {
105  if ( myid_ == 0 && logging_ > 0 )
106  {
107  cout << "Creating Permittivity Coefficient" << endl;
108  }
109  epsCoef_ = new FunctionCoefficient(eps_);
110  }
111 
112  // Inverse of the magnetic permeability
113  if ( muInv_ == NULL )
114  {
115  muInvCoef_ = new ConstantCoefficient(1.0/mu0_);
116  }
117  else
118  {
119  if ( myid_ == 0 && logging_ > 0 )
120  {
121  cout << "Creating Permeability Coefficient" << endl;
122  }
123  muInvCoef_ = new FunctionCoefficient(muInv_);
124  }
125 
126  // Electric conductivity
127  if ( sigma_ != NULL )
128  {
129  if ( myid_ == 0 && logging_ > 0 )
130  {
131  cout << "Creating Conductivity Coefficient" << endl;
132  }
133  sigmaCoef_ = new FunctionCoefficient(sigma_);
134  }
135 
136  // Impedance of free space
137  if ( abcs.Size() > 0 )
138  {
139  if ( myid_ == 0 && logging_ > 0 )
140  {
141  cout << "Creating Admittance Coefficient" << endl;
142  }
143 
144  abc_marker_.SetSize(pmesh.bdr_attributes.Max());
145  if ( abcs.Size() == 1 && abcs[0] < 0 )
146  {
147  // Mark all boundaries as absorbing
148  abc_marker_ = 1;
149  }
150  else
151  {
152  // Mark select boundaries as absorbing
153  abc_marker_ = 0;
154  for (int i=0; i<abcs.Size(); i++)
155  {
156  abc_marker_[abcs[i]-1] = 1;
157  }
158  }
159  etaInvCoef_ = new ConstantCoefficient(sqrt(epsilon0_/mu0_));
160  }
161 
162  // Electric Field Boundary Condition
163  if ( dbcs.Size() > 0 )
164  {
165  if ( myid_ == 0 && logging_ > 0 )
166  {
167  cout << "Configuring Dirichlet BC" << endl;
168  }
169  dbc_marker_.SetSize(pmesh.bdr_attributes.Max());
170  if ( dbcs.Size() == 1 && dbcs[0] < 0 )
171  {
172  // Mark all boundaries as Dirichlet
173  dbc_marker_ = 1;
174  }
175  else
176  {
177  // Mark select boundaries as Dirichlet
178  dbc_marker_ = 0;
179  for (int i=0; i<dbcs.Size(); i++)
180  {
181  dbc_marker_[dbcs[i]-1] = 1;
182  }
183  }
184 
185  HCurlFESpace_->GetEssentialTrueDofs(dbc_marker_, dbc_dofs_);
186 
187  if ( dEdt_bc_ != NULL )
188  {
189  dEdtBCCoef_ = new VectorFunctionCoefficient(3,dEdt_bc_);
190  }
191  else
192  {
193  Vector ebc(3); ebc = 0.0;
194  dEdtBCCoef_ = new VectorConstantCoefficient(ebc);
195  }
196  }
197 
198  // Bilinear Forms
199  if ( myid_ == 0 && logging_ > 0 )
200  {
201  cout << "Creating H(Div) Mass Operator" << endl;
202  }
203  hDivMassMuInv_ = new ParBilinearForm(HDivFESpace_);
204  hDivMassMuInv_->AddDomainIntegrator(new VectorFEMassIntegrator(*muInvCoef_));
205 
206  if ( myid_ == 0 && logging_ > 0 )
207  {
208  cout << "Creating Weak Curl Operator" << endl;
209  }
210  weakCurlMuInv_ = new ParMixedBilinearForm(HDivFESpace_,HCurlFESpace_);
211  weakCurlMuInv_->AddDomainIntegrator(
212  new MixedVectorWeakCurlIntegrator(*muInvCoef_));
213 
214  // Assemble Matrices
215  hDivMassMuInv_->Assemble();
216  weakCurlMuInv_->Assemble();
217 
218  hDivMassMuInv_->Finalize();
219  weakCurlMuInv_->Finalize();
220 
221  if ( sigmaCoef_ || etaInvCoef_ )
222  {
223  if ( myid_ == 0 && logging_ > 0 )
224  {
225  cout << "Creating H(Curl) Loss Operator" << endl;
226  }
227  hCurlLosses_ = new ParBilinearForm(HCurlFESpace_);
228  if ( sigmaCoef_ )
229  {
230  if ( myid_ == 0 && logging_ > 0 )
231  {
232  cout << "Adding domain integrator for conductive regions" << endl;
233  }
234  hCurlLosses_->AddDomainIntegrator(
235  new VectorFEMassIntegrator(*sigmaCoef_));
236  }
237  if ( etaInvCoef_ )
238  {
239  if ( myid_ == 0 && logging_ > 0 )
240  {
241  cout << "Adding boundary integrator for absorbing boundary" << endl;
242  }
243  hCurlLosses_->AddBoundaryIntegrator(
244  new VectorFEMassIntegrator(*etaInvCoef_), abc_marker_);
245  }
246  hCurlLosses_->Assemble();
247  hCurlLosses_->Finalize();
248  M1Losses_ = hCurlLosses_->ParallelAssemble();
249  }
250 
251  // Create Linear Algebra Matrices
252  M2MuInv_ = hDivMassMuInv_->ParallelAssemble();
253  WeakCurlMuInv_ = weakCurlMuInv_->ParallelAssemble();
254 
255  if ( myid_ == 0 && logging_ > 0 )
256  {
257  cout << "Creating discrete curl operator" << endl;
258  }
259  Curl_ = new ParDiscreteCurlOperator(HCurlFESpace_, HDivFESpace_);
260  Curl_->Assemble();
261  Curl_->Finalize();
262  NegCurl_ = Curl_->ParallelAssemble();
263  // Beware this modifies the matrix stored within the Curl_ object.
264  *NegCurl_ *= -1.0;
265 
266  // Build grid functions
267  e_ = new ParGridFunction(HCurlFESpace_);
268  dedt_ = new ParGridFunction(HCurlFESpace_);
269  rhs_ = new ParGridFunction(HCurlFESpace_);
270  b_ = new ParGridFunction(HDivFESpace_);
271 
272  E_ = e_->ParallelProject();
273  B_ = b_->ParallelProject();
274 
275  HD_ = new HypreParVector(HDivFESpace_);
276  RHS_ = new HypreParVector(HCurlFESpace_);
277 
278  // Initialize dedt to zero
279  *dedt_ = 0.0;
280 
281  if ( j_src_)
282  {
283  if ( myid_ == 0 && logging_ > 0 )
284  {
285  cout << "Creating Current Source" << endl;
286  }
287  jCoef_ = new VectorFunctionCoefficient(3,j_src_);
288 
289  j_ = new ParGridFunction(HCurlFESpace_);
290  j_->ProjectCoefficient(*jCoef_);
291 
292  jd_ = new ParLinearForm(HCurlFESpace_);
294  jd_->Assemble();
295  }
296 
297  dtMax_ = GetMaximumTimeStep();
298 }
299 
301 {
302  delete epsCoef_;
303  delete muInvCoef_;
304  delete etaInvCoef_;
305  delete jCoef_;
306  delete dEdtBCCoef_;
307 
308  delete E_;
309  delete B_;
310  delete HD_;
311  delete RHS_;
312 
313  delete e_;
314  delete b_;
315  delete j_;
316  delete dedt_;
317  delete rhs_;
318  delete jd_;
319 
320  delete Curl_;
321 
322  delete M1Losses_;
323  delete M2MuInv_;
324 
325  delete hDivMassMuInv_;
326  delete hCurlLosses_;
327  delete weakCurlMuInv_;
328 
329  delete HCurlFESpace_;
330  delete HDivFESpace_;
331 
332  map<int, ParBilinearForm*>::iterator mit1;
333  for (mit1=a1_.begin(); mit1!=a1_.end(); mit1++)
334  {
335  int i = mit1->first;
336  delete pcg_[i];
337  delete diagScale_[i];
338  delete A1_[i];
339  delete a1_[i];
340  }
341 
342  map<int, Coefficient*>::iterator mit2;
343  for (mit2=dtCoef_.begin(); mit2!=dtCoef_.end(); mit2++)
344  {
345  delete mit2->second;
346  }
347  for (mit2=dtSigmaCoef_.begin(); mit2!=dtSigmaCoef_.end(); mit2++)
348  {
349  delete mit2->second;
350  }
351  for (mit2=dtEtaInvCoef_.begin(); mit2!=dtEtaInvCoef_.end(); mit2++)
352  {
353  delete mit2->second;
354  }
355 
356  map<string, socketstream*>::iterator mit3;
357  for (mit3=socks_.begin(); mit3!=socks_.end(); mit3++)
358  {
359  delete mit3->second;
360  }
361 }
362 
363 HYPRE_Int
365 {
366  return HCurlFESpace_->GlobalTrueVSize();
367 }
368 
369 void
371 {
372  HYPRE_Int size_nd = HCurlFESpace_->GlobalTrueVSize();
373  HYPRE_Int size_rt = HDivFESpace_->GlobalTrueVSize();
374  if ( myid_ == 0 )
375  {
376  cout << "Number of H(Curl) unknowns: " << size_nd << endl;
377  cout << "Number of H(Div) unknowns: " << size_rt << endl << flush;
378  }
379 }
380 
381 void
383 {
384  eCoef_ = &EFieldCoef;
385  e_->ProjectCoefficient(EFieldCoef);
386  e_->ParallelProject(*E_);
387 }
388 
389 void
391 {
392  bCoef_ = &BFieldCoef;
393  b_->ProjectCoefficient(BFieldCoef);
394  b_->ParallelProject(*B_);
395 }
396 
397 void
398 MaxwellSolver::Mult(const Vector &B, Vector &dEdt) const
399 {
400  implicitSolve(0.0, B, dEdt);
401 }
402 
403 void
404 MaxwellSolver::ImplicitSolve(double dt, const Vector &B, Vector &dEdt)
405 {
406  implicitSolve(dt, B, dEdt);
407 }
408 
409 void
410 MaxwellSolver::setupSolver(const int idt, const double dt) const
411 {
412  if ( pcg_.find(idt) == pcg_.end() )
413  {
414  if ( myid_ == 0 && logging_ > 0 )
415  {
416  cout << "Creating implicit operator for dt = " << dt << endl;
417  }
418 
419  a1_[idt] = new ParBilinearForm(HCurlFESpace_);
420  a1_[idt]->AddDomainIntegrator(
421  new VectorFEMassIntegrator(epsCoef_));
422 
423  if ( idt != 0 )
424  {
425  dtCoef_[idt] = new ConstantCoefficient(0.5 * dt);
426  if ( sigmaCoef_ )
427  {
428  dtSigmaCoef_[idt] = new TransformedCoefficient(dtCoef_[idt],
429  sigmaCoef_,
430  prodFunc);
431  a1_[idt]->AddDomainIntegrator(
432  new VectorFEMassIntegrator(dtSigmaCoef_[idt]));
433  }
434  if ( etaInvCoef_ )
435  {
436  dtEtaInvCoef_[idt] = new TransformedCoefficient(dtCoef_[idt],
437  etaInvCoef_,
438  prodFunc);
439  a1_[idt]->AddBoundaryIntegrator(
440  new VectorFEMassIntegrator(dtEtaInvCoef_[idt]),
441  const_cast<Array<int>&>(abc_marker_));
442  }
443  }
444 
445  a1_[idt]->Assemble();
446  a1_[idt]->Finalize();
447  A1_[idt] = a1_[idt]->ParallelAssemble();
448 
449  diagScale_[idt] = new HypreDiagScale(*A1_[idt]);
450  pcg_[idt] = new HyprePCG(*A1_[idt]);
451  pcg_[idt]->SetTol(1.0e-12);
452  pcg_[idt]->SetMaxIter(200);
453  pcg_[idt]->SetPrintLevel(0);
454  pcg_[idt]->SetPreconditioner(*diagScale_[idt]);
455  }
456 }
457 
458 void
459 MaxwellSolver::implicitSolve(double dt, const Vector &B, Vector &dEdt) const
460 {
461  int idt = hCurlLosses_ ? ((int)(dtScale_ * dt / dtMax_)) : 0;
462 
463  b_->Distribute(B);
464  weakCurlMuInv_->Mult(*b_, *rhs_);
465 
466  if ( hCurlLosses_ )
467  {
468  e_->Distribute(*E_);
469  hCurlLosses_->AddMult(*e_, *rhs_, -1.0);
470  }
471 
472  if ( jd_ )
473  {
474  jCoef_->SetTime(t); // 't' is member data from mfem::TimeDependentOperator
475  jd_->Assemble();
476  *rhs_ -= *jd_;
477  }
478 
479  if ( dEdtBCCoef_ )
480  {
481  dEdtBCCoef_->SetTime(t);
482  dedt_->ProjectBdrCoefficientTangent(*dEdtBCCoef_,
483  const_cast<Array<int>&>(dbc_marker_));
484  }
485 
486  // Create objects and matrices for solving with the given time step
487  setupSolver(idt, dt);
488 
489  // Apply essential BCs and determine true DoFs for the right hand side
490  a1_[idt]->FormLinearSystem(dbc_dofs_, *dedt_, *rhs_, *A1_[idt], dEdt, *RHS_);
491 
492  // Solve for the time derivative of the electric field (true DoFs)
493  pcg_[idt]->Mult(*RHS_, dEdt);
494 
495  // Distribute shared DoFs to relevant processors
496  a1_[idt]->RecoverFEMSolution(dEdt, *rhs_, *dedt_);
497 }
498 
499 void
501 {
502  e_->Distribute(*E_);
503  b_->Distribute(*B_);
504 }
505 
506 double
508 {
509  if ( dtMax_ > 0.0 )
510  {
511  return dtMax_;
512  }
513 
514  HypreParVector * v0 = new HypreParVector(HCurlFESpace_);
515  HypreParVector * v1 = new HypreParVector(HCurlFESpace_);
516  HypreParVector * u0 = new HypreParVector(HDivFESpace_);
517 
518  v0->Randomize(1234);
519 
520  int iter = 0, nstep = 20;
521  double dt0 = 1.0, dt1 = 1.0, change = 1.0, ptol = 0.001;
522 
523  // Create Solver assuming no loss operators
524  setupSolver(0, 0.0);
525 
526  // Use power method to approximate the largest eigenvalue of the update
527  // operator.
528  while ( iter < nstep && change > ptol )
529  {
530  double normV0 = InnerProduct(*v0,*v0);
531  *v0 /= sqrt(normV0);
532 
533  NegCurl_->Mult(*v0,*u0);
534  M2MuInv_->Mult(*u0,*HD_);
535  NegCurl_->MultTranspose(*HD_,*RHS_);
536 
537  pcg_[0]->Mult(*RHS_,*v1);
538 
539  double lambda = InnerProduct(*v0,*v1);
540  dt1 = 2.0/sqrt(lambda);
541  change = fabs((dt1-dt0)/dt0);
542  dt0 = dt1;
543 
544  if ( myid_ == 0 && logging_ > 1 )
545  {
546  cout << iter << ": " << dt0 << " " << change << endl;
547  }
548 
549  std::swap(v0, v1);
550 
551  iter++;
552  }
553 
554  delete v0;
555  delete v1;
556  delete u0;
557 
558  return dt0;
559 }
560 
561 double
563 {
564  double energy = 0.0;
565 
566  A1_[0]->Mult(*E_,*RHS_);
567  M2MuInv_->Mult(*B_,*HD_);
568 
569  energy = InnerProduct(*E_,*RHS_) + InnerProduct(*B_,*HD_);
570 
571  return 0.5 * energy;
572 }
573 
574 void
576 {
577  visit_dc_ = &visit_dc;
578 
579  visit_dc.RegisterField("E", e_);
580  visit_dc.RegisterField("B", b_);
581  if ( j_ )
582  {
583  visit_dc.RegisterField("J", j_);
584  }
585 }
586 
587 void
589 {
590  if ( visit_dc_ )
591  {
592  if ( myid_ == 0 && logging_ > 1 )
593  { cout << "Writing VisIt files ..." << flush; }
594 
595  if ( j_ )
596  {
597  jCoef_->SetTime(t);
598  j_->ProjectCoefficient(*jCoef_);
599  }
600 
601  visit_dc_->SetCycle(it);
602  visit_dc_->SetTime(t);
603  visit_dc_->Save();
604 
605  if ( myid_ == 0 && logging_ > 1 ) { cout << " " << endl << flush; }
606  }
607 }
608 
609 void
611 {
612  if ( myid_ == 0 && logging_ > 0 )
613  { cout << "Opening GLVis sockets." << endl << flush; }
614 
615  socks_["E"] = new socketstream;
616  socks_["E"]->precision(8);
617 
618  socks_["B"] = new socketstream;
619  socks_["B"]->precision(8);
620 
621  if ( j_ )
622  {
623  socks_["J"] = new socketstream;
624  socks_["J"]->precision(8);
625  }
626 
627  if ( myid_ == 0 && logging_ > 0 )
628  { cout << "GLVis sockets open." << endl << flush; }
629 }
630 
631 void
633 {
634  if ( myid_ == 0 && logging_ > 1 )
635  { cout << "Sending data to GLVis ..." << flush; }
636 
637  char vishost[] = "localhost";
638  int visport = 19916;
639 
640  int Wx = 0, Wy = 0; // window position
641  int Ww = 350, Wh = 350; // window size
642  int offx = Ww+10, offy = Wh+45; // window offsets
643 
644  VisualizeField(*socks_["E"], vishost, visport,
645  *e_, "Electric Field (E)", Wx, Wy, Ww, Wh);
646  Wx += offx;
647 
648  VisualizeField(*socks_["B"], vishost, visport,
649  *b_, "Magnetic Flux Density (B)", Wx, Wy, Ww, Wh);
650 
651  if ( j_ )
652  {
653  Wx = 0;
654  Wy += offy;
655 
656  jCoef_->SetTime(t); // Is member data from mfem::TimeDependentOperator
657  j_->ProjectCoefficient(*jCoef_);
658 
659  VisualizeField(*socks_["J"], vishost, visport,
660  *j_, "Current Density (J)", Wx, Wy, Ww, Wh);
661  }
662  if ( myid_ == 0 && logging_ > 1 ) { cout << " " << flush; }
663 }
664 
665 } // namespace electromagnetics
666 
667 } // namespace mfem
668 
669 #endif // MFEM_USE_MPI
int Size() const
Logical size of the array.
Definition: array.hpp:133
void SetCycle(int c)
Set time cycle (for time-dependent simulations)
Subclass constant coefficient.
Definition: coefficient.hpp:57
Type type
Describes the form of the TimeDependentOperator.
Definition: operator.hpp:163
HYPRE_Int MultTranspose(HypreParVector &x, HypreParVector &y, double alpha=1.0, double beta=0.0)
Computes y = alpha * A^t * x + beta * y.
Definition: hypre.cpp:1075
virtual void AddMult(const Vector &x, Vector &y, const double a=1.0) const
double prodFunc(double a, double b)
void Assemble()
Assembles the linear form i.e. sums over all domain/bdr integrators.
Definition: linearform.cpp:51
virtual void Save()
Save the collection and a VisIt root file.
virtual void ProjectCoefficient(Coefficient &coeff)
Definition: pgridfunc.cpp:290
virtual void Finalize(int skip_zeros=1)
Finalizes the matrix initialization.
HYPRE_Int Mult(HypreParVector &x, HypreParVector &y, double alpha=1.0, double beta=0.0)
Computes y = alpha * A * x + beta * y.
Definition: hypre.cpp:1005
void SetInitialBField(VectorCoefficient &BFieldCoef)
HypreParMatrix * ParallelAssemble()
Returns the matrix assembled on the true dofs, i.e. P^t A P.
double t
Current time.
Definition: operator.hpp:162
HypreParMatrix * ParallelAssemble()
Returns the matrix assembled on the true dofs, i.e. P_test^t A P_trial.
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
void ImplicitSolve(const double dt, const Vector &x, Vector &k)
Solve the equation: k = f(x + dt k, t), for the unknown k at the current time t.
Class for parallel linear form.
Definition: plinearform.hpp:26
void RegisterVisItFields(VisItDataCollection &visit_dc)
This type assumes F(x,k,t) = k, i.e. k = f(x,t) = G(x,t).
Definition: operator.hpp:156
Data collection with VisIt I/O routines.
void VisualizeField(socketstream &sock, const char *vishost, int visport, GridFunction &gf, const char *title, int x, int y, int w, int h, bool vec)
Definition: fem_extras.cpp:59
T Max() const
Find the maximal element in the array, using the comparison operator &lt; for class T.
Definition: array.cpp:109
void Assemble(int skip_zeros=1)
double muInv(const Vector &x)
Definition: maxwell.cpp:96
HYPRE_Int GlobalTrueVSize() const
Definition: pfespace.hpp:247
void Assemble(int skip_zeros=1)
Assemble the local matrix.
HYPRE_Int Randomize(HYPRE_Int seed)
Set random values.
Definition: hypre.cpp:213
This is the most general type, no assumptions on F and G.
Definition: operator.hpp:157
int Dimension() const
Definition: mesh.hpp:645
void SetTime(double t)
Set physical time (for time-dependent simulations)
HypreParMatrix * ParallelAssemble() const
Returns the matrix &quot;assembled&quot; on the true dofs.
Wrapper for hypre&#39;s parallel vector class.
Definition: hypre.hpp:73
void AddDomainIntegrator(LinearFormIntegrator *lfi)
Adds new Domain Integrator.
Definition: linearform.cpp:19
void Mult(const Vector &B, Vector &dEdt) const
Perform the action of the operator: y = k = f(x, t), where k solves the algebraic equation F(x...
Array< int > bdr_attributes
A list of all unique boundary attributes used by the Mesh.
Definition: mesh.hpp:174
void j_src(const Vector &x, double t, Vector &j)
Definition: maxwell.cpp:108
MPI_Comm GetComm() const
Definition: pmesh.hpp:123
void SetSize(int nsize)
Change logical size of the array, keep existing entries.
Definition: array.hpp:569
void SetTime(double t)
void Distribute(const Vector *tv)
Definition: pgridfunc.cpp:131
int height
Dimension of the output / number of rows in the matrix.
Definition: operator.hpp:24
virtual void RegisterField(const std::string &field_name, GridFunction *gf)
Add a grid function to the collection and update the root file.
double InnerProduct(HypreParVector *x, HypreParVector *y)
Definition: hypre.cpp:252
Class for parallel bilinear form using different test and trial FE spaces.
virtual void Finalize(int skip_zeros=1)
Finalizes the matrix initialization.
void AddBoundaryIntegrator(BilinearFormIntegrator *bfi)
Adds new Boundary Integrator.
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
Adds new Domain Integrator.
void ProjectBdrCoefficientTangent(VectorCoefficient &vcoeff, Array< int > &bdr_attr)
Definition: gridfunc.cpp:1717
Class for parallel bilinear form.
for VectorFiniteElements (Nedelec, Raviart-Thomas)
Definition: lininteg.hpp:231
class for C-function coefficient
Vector data type.
Definition: vector.hpp:48
Class for parallel grid function.
Definition: pgridfunc.hpp:32
virtual void Assemble(int skip_zeros=1)
Class for parallel meshes.
Definition: pmesh.hpp:32
virtual void Mult(const Vector &x, Vector &y) const
Operator application: y=A(x).
int width
Dimension of the input / number of columns in the matrix.
Definition: operator.hpp:25
Integrator for (Q u, v) for VectorFiniteElements.
void SetInitialEField(VectorCoefficient &EFieldCoef)
void ParallelProject(Vector &tv) const
Returns the vector restricted to the true dofs.
Definition: pgridfunc.cpp:167
double sigma(const Vector &x)
Definition: maxwell.cpp:102