MFEM  v4.5.1
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
admfem.hpp
Go to the documentation of this file.
1 // Copyright (c) 2010-2022, Lawrence Livermore National Security, LLC. Produced
2 // at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3 // LICENSE and NOTICE for details. LLNL-CODE-806117.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability visit https://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the BSD-3 license. We welcome feedback and contributions, see file
10 // CONTRIBUTING.md for details.
11 
12 #ifndef ADMFEM_HPP
13 #define ADMFEM_HPP
14 
15 #include "mfem.hpp"
16 #include "../../linalg/dual.hpp"
17 #include "tadvector.hpp"
18 #include "taddensemat.hpp"
19 
20 #ifdef MFEM_USE_CODIPACK
21 #include <codi.hpp>
22 
23 namespace mfem
24 {
25 
26 namespace ad
27 {
28 #ifdef MFEM_USE_ADFORWARD
29 /// Forward AD type declaration
30 typedef codi::RealForward ADFloatType;
31 /// Vector type for AD-numbers
33 /// Matrix type for AD-numbers
35 #else
36 /// Reverse AD type declaration
37 typedef codi::RealReverse ADFloatType;
38 /// Vector type for AD-numbers
40 /// Matrix type for AD-numbers
42 #endif
43 }
44 
45 /// The class provides an evaluation of the Jacobian of a templated vector
46 /// function provided in the constructor. The Jacobian is evaluated with the
47 /// help of automatic differentiation (AD). The template parameters specify the
48 /// size of the return vector (vector_size), the size of the input vector
49 /// (state_size), and the size of the parameters supplied to the function.
50 template<int vector_size=1, int state_size=1, int param_size=0>
52 {
53 public:
54  /// F_ is user implemented function to be differentiated by
55  /// VectorFuncAutoDiff. The signature of the function is: F_(mfem::Vector&
56  /// parameters, ad::ADVectorType& state_vector, ad::ADVectorType& result).
57  /// The parameters vector should have size param_size. The state_vector
58  /// should have size state_size, and the result vector should have size
59  /// vector_size. All size parameters are teplate parameters in
60  /// VectorFuncAutoDiff.
62  std::function<void(mfem::Vector&, ad::ADVectorType&, ad::ADVectorType&)> F_)
63  {
64  F=F_;
65  }
66 
67  /// Evaluates the Jacobian of the vector function F_ for a set of parameters
68  /// (vparam) and state vector vstate. The Jacobian (jac) has dimensions
69  /// [vector_size x state_size].
70  void Jacobian(mfem::Vector &vparam, mfem::Vector &vstate,
71  mfem::DenseMatrix &jac)
72  {
73 #ifdef MFEM_USE_ADFORWARD
74  // use forward mode
75  jac.SetSize(vector_size, state_size);
76  jac = 0.0;
77  {
78  ad::ADVectorType ad_state(state_size);
79  ad::ADVectorType ad_result(vector_size);
80  for (int i=0; i<state_size; i++)
81  {
82  ad_state[i].setValue(vstate[i]);
83  ad_state[i].setGradient(0.0);
84  }
85  for (int ii=0; ii<state_size; ii++)
86  {
87  ad_state[ii].setGradient(1.0);
88  F(vparam,ad_state,ad_result);
89  for (int jj=0; jj<vector_size; jj++)
90  {
91  jac(jj,ii)=ad_result[jj].getGradient();
92  }
93  ad_state[ii].setGradient(0.0);
94  }
95  }
96 #else // use reverse mode
97  jac.SetSize(vector_size, state_size);
98  jac = 0.0;
99  {
100  ad::ADVectorType ad_state(state_size);
101  ad::ADVectorType ad_result(vector_size);
102  for (int i=0; i<state_size; i++)
103  {
104  ad_state[i]=vstate[i];
105  }
106 
107  ad::ADFloatType::TapeType& tape =ad::ADFloatType::getGlobalTape();
108  typename ad::ADFloatType::TapeType::Position pos=tape.getPosition();
109 
110  tape.setActive();
111  for (int ii=0; ii<state_size; ii++) { tape.registerInput(ad_state[ii]); }
112  F(vparam,ad_state,ad_result);
113  for (int ii=0; ii<vector_size; ii++) { tape.registerOutput(ad_result[ii]); }
114  tape.setPassive();
115 
116  for (int jj=0; jj<vector_size; jj++)
117  {
118  ad_result[jj].setGradient(1.0);
119  tape.evaluate();
120  for (int ii=0; ii<state_size; ii++)
121  {
122  jac(jj,ii)=ad_state[ii].getGradient();
123  }
124  tape.clearAdjoints();
125  ad_result[jj].setGradient(0.0);
126  }
127  tape.reset(pos);
128  }
129 #endif
130  }
131 private:
132  std::function<void(mfem::Vector&, ad::ADVectorType&, ad::ADVectorType&)> F;
133 }; // VectorFuncAutoDiff
134 
135 /// The class provides an evaluation of the Jacobian of a templated vector
136 /// function provided as a functor TFunctor. The Jacobian is evaluated with the
137 /// help of automatic differentiation (AD). The template parameters specify the
138 /// size of the return vector (vector_size), the size of the input vector
139 /// (state_size), and the size of the parameters supplied to the function. The
140 /// TFunctor functor is a template class with parameters [Float data type],
141 /// [Vector type for the additional parameters], [Vector type for the state
142 /// vector and the return residual]. The integer template parameters are the
143 /// same ones passed to QVectorFuncAutoDiff.
144 template<template<typename, typename, typename, int, int, int> class TFunctor
145  , int vector_size=1, int state_size=1, int param_size=0>
147 {
148 public:
149  /// Evaluates the vector function for given set of parameters and state
150  /// values in vector uu. The result is returned in vector rr.
151  void VectorFunc(const mfem::Vector &vparam, mfem::Vector &uu, mfem::Vector& rr)
152  {
153  rf(vparam,uu,rr);
154  }
155 
156  /// Returns the gradient of TFunctor(...) in the dense matrix jac. The
157  /// dimensions of jac are vector_size x state_size, where state_size is the
158  /// length of vector uu.
160  {
161 #ifdef MFEM_USE_ADFORWARD
162  // use forward mode
163  jac.SetSize(vector_size, state_size);
164  jac = 0.0;
165  {
166  ad::ADVectorType aduu(state_size);
167  ad::ADVectorType rr(vector_size);
168  for (int i=0; i<state_size; i++)
169  {
170  aduu[i].setValue(uu[i]);
171  aduu[i].setGradient(0.0);
172  }
173 
174  for (int ii=0; ii<state_size; ii++)
175  {
176  aduu[ii].setGradient(1.0);
177  tf(vparam,aduu,rr);
178  for (int jj=0; jj<vector_size; jj++)
179  {
180  jac(jj,ii)=rr[jj].getGradient();
181  }
182  aduu[ii].setGradient(0.0);
183  }
184  }
185 #else // end MFEM_USE_ADFORWARD
186  // use reverse mode
187  jac.SetSize(vector_size, state_size);
188  jac = 0.0;
189  {
190  ad::ADVectorType aduu(state_size);
191  ad::ADVectorType rr(vector_size);
192  for (int i=0; i<state_size; i++)
193  {
194  aduu[i]=uu[i];
195  }
196 
197  ad::ADFloatType::TapeType& tape =ad::ADFloatType::getGlobalTape();
198  typename ad::ADFloatType::TapeType::Position pos=tape.getPosition();
199 
200  tape.setActive();
201  for (int ii=0; ii<state_size; ii++) { tape.registerInput(aduu[ii]); }
202  tf(vparam,aduu,rr);
203  for (int ii=0; ii<vector_size; ii++) { tape.registerOutput(rr[ii]); }
204  tape.setPassive();
205  for (int jj=0; jj<vector_size; jj++)
206  {
207  rr[jj].setGradient(1.0);
208  tape.evaluate();
209  for (int ii=0; ii<state_size; ii++)
210  {
211  jac(jj,ii)=aduu[ii].getGradient();
212  }
213  tape.clearAdjoints();
214  rr[jj].setGradient(0.0);
215  }
216  tape.reset(pos);
217  }
218 #endif
219  }
220 private:
221 
222 
223  TFunctor<ad::ADFloatType, const Vector, ad::ADVectorType,
224  vector_size, state_size, param_size> tf;
225 
226  TFunctor<double,const mfem::Vector, mfem::Vector,
227  vector_size, state_size, param_size> rf;
228 
229 };
230 
231 /// The class provides an evaluation of the first derivatives and the Hessian of
232 /// a templated scalar function provided as a functor TFunctor. Both the first
233 /// and the second derivatives are evaluated with the help of automatic
234 /// differentiation (AD). The template parameters specify the size of the input
235 /// vector (state_size) and the size of the parameters supplied to the
236 /// function. The TFunctor functor is a template class with parameters [Float
237 /// data type], [Vector type for the additional parameters], [Vector type for
238 /// the state vector and the return residual]. The integer template parameters
239 /// are the same ones passed to QFunctionAutoDiff.
240 template<template<typename, typename, typename, int, int> class TFunctor
241  , int state_size=1, int param_size=0>
243 {
244 public:
245 
246  /// Evaluates a function for arguments vparam and uu. The evaluation is
247  /// based on the operator() in the user provided functor TFunctor.
248  double Eval(const mfem::Vector &vparam, mfem::Vector &uu)
249  {
250  return rf(vparam,uu);
251  }
252 
253  /// Provides the same functionality as Grad.
254  void VectorFunc(const mfem::Vector &vparam, mfem::Vector &uu, mfem::Vector &rr)
255  {
256  Grad(vparam,uu,rr);
257  }
258 
259  /// Returns the first derivative of TFunctor(...) with respect to the active
260  /// arguments proved in vector uu. The length of rr is the same as for uu.
261  void Grad(const mfem::Vector &vparam, mfem::Vector &uu, mfem::Vector &rr)
262  {
263 
264 #ifdef MFEM_USE_ADFORWARD
265  // use forward mode
266  rr.SetSize(state_size);
267  {
268  ad::ADVectorType aduu(state_size);
269  for (int i=0; i<state_size; i++)
270  {
271  aduu[i].setValue(uu[i]);
272  aduu[i].setGradient(0.0);
273  }
274 
275  ad::ADFloatType rez;
276 
277  for (int ii=0; ii<state_size; ii++)
278  {
279  aduu[ii].setGradient(1.0);
280  rez=tf(vparam,aduu);
281  rr[ii]=rez.getGradient();
282  aduu[ii].setGradient(0.0);
283  }
284  }
285 #else
286  {
287  ad::ADVectorType aduu(state_size);
288  ad::ADFloatType rez;
289  for (int i=0; i<state_size; i++)
290  {
291  aduu[i]=uu[i];
292  }
293 
294  ad::ADFloatType::TapeType& tape =ad::ADFloatType::getGlobalTape();
295  typename ad::ADFloatType::TapeType::Position pos=tape.getPosition();
296 
297  tape.setActive();
298  for (int ii=0; ii<state_size; ii++) { tape.registerInput(aduu[ii]); }
299 
300  rez=tf(vparam,aduu);
301  tape.registerOutput(rez);
302  tape.setPassive();
303 
304  rez.setGradient(1.0);
305  tape.evaluate();
306  for (int i=0; i<state_size; i++)
307  {
308  rr[i]=aduu[i].getGradient();
309  }
310  tape.reset(pos);
311  }
312 #endif
313  }
314 
315  /// Provides same functionality as Hessian.
317  {
318  Hessian(vparam,uu,jac);
319  }
320 
321 #ifdef MFEM_USE_ADFORWARD
322  // use forward-forward mode
323  typedef codi::RealForwardGen<double> ADFType;
326 
327  typedef codi::RealForwardGen<ADFType> ADSType;
330 #else
331  //use mixed forward and reverse mode
332  typedef codi::RealForwardGen<double> ADFType;
335 
336  typedef codi::RealReverseGen<ADFType> ADSType;
339 #endif
340 
341 
342  /// Returns the Hessian of TFunctor(...) in the dense matrix jac. The
343  /// dimensions of jac are state_size x state_size, where state_size is the
344  /// length of vector uu.
346  {
347 #ifdef MFEM_USE_ADFORWARD
348  // use forward-forward mode
349  jac.SetSize(state_size);
350  jac=0.0;
351  {
352  ADSVector aduu(state_size);
353  for (int ii = 0; ii < state_size; ii++)
354  {
355  aduu[ii].value().value()=uu[ii];
356  aduu[ii].value().gradient()=0.0;
357  aduu[ii].gradient().value()=0.0;
358  aduu[ii].gradient().gradient()=0.0;
359  }
360 
361  for (int ii = 0; ii < state_size; ii++)
362  {
363  aduu[ii].value().gradient()=1.0;
364  for (int jj=0; jj<(ii+1); jj++)
365  {
366  aduu[jj].gradient().value()=1.0;
367  ADSType rez=sf(vparam,aduu);
368  jac(ii,jj)=rez.gradient().gradient();
369  jac(jj,ii)=jac(ii,jj);
370  aduu[jj].gradient().value()=0.0;
371  }
372  aduu[ii].value().gradient()=0.0;
373  }
374  }
375 #else
376  // use mixed forward and reverse mode
377  jac.SetSize(state_size);
378  jac=0.0;
379  {
380  ADSVector aduu(state_size);
381  for (int ii=0; ii < state_size ; ii++)
382  {
383  aduu[ii].value().value()=uu[ii];
384  }
385 
386  ADSType rez;
387 
388  ADSType::TapeType& tape = ADSType::getGlobalTape();
389  typename ADSType::TapeType::Position pos;
390  for (int ii = 0; ii < state_size ; ii++)
391  {
392  pos=tape.getPosition();
393  tape.setActive();
394 
395  for (int jj=0; jj < state_size; jj++)
396  {
397  if (jj==ii) {aduu[jj].value().gradient()=1.0;}
398  else {aduu[jj].value().gradient()=0.0;}
399  tape.registerInput(aduu[jj]);
400  }
401 
402  rez=sf(vparam,aduu);
403  tape.registerOutput(rez);
404  tape.setPassive();
405 
406  rez.gradient().value()=1.0;
407  tape.evaluate();
408 
409  for (int jj=0; jj<(ii+1); jj++)
410  {
411  jac(ii,jj)=aduu[jj].gradient().gradient();
412  jac(jj,ii)=jac(ii,jj);
413  }
414  tape.reset(pos);
415  }
416 
417  }
418 #endif
419  }
420 private:
421  TFunctor<double, const mfem::Vector,
422  mfem::Vector, state_size, param_size> rf;
423 
424  TFunctor<ad::ADFloatType, const mfem::Vector,
425  ad::ADVectorType, state_size, param_size> tf;
426 
427  TFunctor<ADSType, const mfem::Vector, ADSVector,
428  state_size, param_size> sf;
429 };
430 
431 }
432 #else // end MFEM_USE_CODIPACK
433 
434 // USE NATIVE IMPLEMENTATION
435 namespace mfem
436 {
437 
438 namespace ad
439 {
440 /// MFEM native forward AD-type
441 typedef internal::dual<double, double> ADFloatType;
442 /// Vector type for AD-type numbers
443 typedef TAutoDiffVector<ADFloatType> ADVectorType;
444 /// Matrix type for AD-type numbers
445 typedef TAutoDiffDenseMatrix<ADFloatType> ADMatrixType;
446 }
447 
448 /// The class provides an evaluation of the Jacobian of a templated vector
449 /// function provided in the constructor. The Jacobian is evaluated with the
450 /// help of automatic differentiation (AD). The template parameters specify the
451 /// size of the return vector (vector_size), the size of the input vector
452 /// (state_size), and the size of the parameters supplied to the function.
453 template<int vector_size=1, int state_size=1, int param_size=0>
454 class VectorFuncAutoDiff
455 {
456 public:
457  /// F_ is user implemented function to be differentiated by
458  /// VectorFuncAutoDiff. The signature of the function is: F_(mfem::Vector&
459  /// parameters, ad::ADVectroType& state_vector, ad::ADVectorType& result).
460  /// The parameters vector should have size param_size. The state_vector
461  /// should have size state_size, and the result vector should have size
462  /// vector_size. All size parameters are teplate parameters in
463  /// VectorFuncAutoDiff.
465  std::function<void(mfem::Vector&, ad::ADVectorType&, ad::ADVectorType&)> F_)
466  {
467  F=F_;
468  }
469 
470  /// Evaluates the Jacobian of the vector function F_ for a set of parameters
471  /// (vparam) and state vector uu. The Jacobian (jac) has dimensions
472  /// [vector_size x state_size].
474  {
475  jac.SetSize(vector_size, state_size);
476  jac = 0.0;
477  {
478  ad::ADVectorType aduu(uu); // all dual numbers are initialized to zero
479  ad::ADVectorType rr(vector_size);
480 
481  for (int ii = 0; ii < state_size; ii++)
482  {
483  aduu[ii].gradient = 1.0;
484  F(vparam,aduu,rr);
485  for (int jj = 0; jj < vector_size; jj++)
486  {
487  jac(jj, ii) = rr[jj].gradient;
488  }
489  aduu[ii].gradient = 0.0;
490  }
491  }
492  }
493 
494 private:
495  std::function<void(mfem::Vector&, ad::ADVectorType&, ad::ADVectorType&)> F;
496 
497 };
498 
499 /// The class provides an evaluation of the Jacobian of a templated vector
500 /// function provided as a functor TFunctor. The Jacobian is evaluated with the
501 /// help of automatic differentiation (AD). The template parameters specify the
502 /// size of the return vector (vector_size), the size of the input vector
503 /// (state_size), and the size of the parameters supplied to the function. The
504 /// TFunctor functor is a template class with parameters [Float data type],
505 /// [Vector type for the additional parameters], [Vector type for the state
506 /// vector and the return residual].
507 /// The integer template parameters are the same ones
508 /// passed to QVectorFuncAutoDiff. \n
509 /// Example: f={sin(a*x*y), cos(b*x*y*z), x*x+y*x} \n
510 /// The vector function has vector_size=3, and state_size=3, i.e., it has
511 /// three arguments [x,y,z]. The parameters [a,b] size is 2.
512 /// The functor class will have the following form
513 /// \code{.cpp}
514 /// template<typename TDataType, typename TParamVector, typename TStateVector,
515 /// int residual_size, int state_size, int param_size>
516 /// class MyVectorFunction{
517 /// public:
518 /// TDataType operator() (TParamVector& vparam, TStateVector& uu, TStateVector& rr)
519 /// {
520 /// auto a=vparam[0];
521 /// auto b=vparam[1];
522 /// rr[0]=sin(a*uu[0]*uu[1]);
523 /// rr[1]=cos(b*uu[0]*uu[1]*uu[2]);
524 /// rr[2]=uu[0]*uu[0]+uu[0]*uu[1];
525 /// }
526 //
527 /// };
528 /// \endcode
529 template<template<typename, typename, typename, int, int, int> class TFunctor
530  , int vector_size=1, int state_size=1, int param_size=0>
531 class QVectorFuncAutoDiff
532 {
533 private:
534  /// MFEM native forward AD-type
535  typedef internal::dual<double, double> ADFType;
536  /// Vector type for AD-type numbers
537  typedef TAutoDiffVector<ADFType> ADFVector;
538  /// Matrix type for AD-type numbers
539  typedef TAutoDiffDenseMatrix<ADFType> ADFDenseMatrix;
540 
541 public:
542  /// Returns a vector valued function rr for supplied passive arguments
543  /// vparam and active arguments uu. The evaluation is based on the user
544  /// supplied TFunctor template class.
545  void VectorFunc(const Vector &vparam, Vector &uu, Vector &rr)
546  {
547  func(vparam, uu, rr);
548  }
549 
550  /// Returns the gradient of TFunctor(...) residual in the dense matrix jac.
551  /// The dimensions of jac are vector_size x state_size, where state_size is
552  /// the length of vector uu.
554  {
555  // use native AD package
556  jac.SetSize(vector_size, state_size);
557  jac = 0.0;
558  {
559  ADFVector aduu(uu); // all dual numbers are initialized to zero
560  ADFVector rr(vector_size);
561 
562  for (int ii = 0; ii < state_size; ii++)
563  {
564  aduu[ii].gradient = 1.0;
565  Eval(vparam, aduu, rr);
566  for (int jj = 0; jj < vector_size; jj++)
567  {
568  jac(jj, ii) = rr[jj].gradient;
569  }
570  aduu[ii].gradient = 0.0;
571  }
572  }
573  }
574 
575 private:
576  /// Evaluates the residual from TFunctor(...).
577  /// Intended for internal use only.
578  void Eval(const Vector &vparam, ADFVector &uu, ADFVector &rr)
579  {
580  tf(vparam, uu, rr);
581  }
582 
583  TFunctor<double, const Vector, Vector,
584  vector_size, state_size, param_size> func;
585 
586  TFunctor<ADFType, const Vector, ADFVector,
587  vector_size, state_size, param_size> tf;
588 
589 };
590 
591 /// The class provides an evaluation of the first derivatives and the Hessian of
592 /// a templated scalar function provided as a functor TFunctor. Both the first
593 /// and the second derivatives are evaluated with the help of automatic
594 /// differentiation (AD). The template parameters specify the size of the input
595 /// vector (state_size) and the size of the parameters supplied to the
596 /// function. The TFunctor functor is a template class with parameters [Float
597 /// data type], [Vector type for the additional parameters], [Vector type for
598 /// the state vector and the return residual]. The integer template parameters
599 /// are the same ones passed to QFunctionAutoDiff. The class duplicates Grad and
600 /// Hessian, i.e., VectorFunc calls Grad, and Jacobian calls Hessian. The main
601 /// reason is to provide the same interface as the QVectorFuncAutoDiff class
602 /// used to differentiate vector functions. Such compatibility allows users to
603 /// start implementation of their problem based only on some energy or a weak
604 /// form. The gradients, computed with Grad/VectorFunc, of the function will
605 /// contribute to the FE residual. Computed with Hessian/Jacobian, the Hessian
606 /// will contribute to the tangent matrix in Newton's iterations. Once the
607 /// implementation is complete and tested, the users can start improving the
608 /// performance by replacing Grad/VectorFunc with a hand-coded version. The
609 /// gradient is a vector function and can be differentiated with the
610 /// functionality implemented in QVectorFuncAutoDiff. Thus, the user can
611 /// directly employ AD for computing the contributions to the global tangent
612 /// matrix. The main code will not require changes as the names Grad/VectorFunc
613 /// and Hessian/Jacobian are mirrored.
614 template<template<typename, typename, typename, int, int> class TFunctor
615  , int state_size=1, int param_size=0>
616 class QFunctionAutoDiff
617 {
618 private:
619  /// MFEM native AD-type for first derivatives
620  typedef internal::dual<double, double> ADFType;
621  /// Vector type for AD-numbers(first derivatives)
622  typedef TAutoDiffVector<ADFType> ADFVector;
623  /// Matrix type for AD-numbers(first derivatives)
624  typedef TAutoDiffDenseMatrix<ADFType> ADFDenseMatrix;
625  /// MFEM native AD-type for second derivatives
626  typedef internal::dual<ADFType, ADFType> ADSType;
627  /// Vector type for AD-numbers (second derivatives)
628  typedef TAutoDiffVector<ADSType> ADSVector;
629  /// Vector type for AD-numbers (second derivatives)
630  typedef TAutoDiffDenseMatrix<ADSType> ADSDenseMatrix;
631 
632 public:
633  /// Evaluates a function for arguments vparam and uu. The evaluation is
634  /// based on the operator() in the user provided functor TFunctor.
635  double Eval(const Vector &vparam, Vector &uu)
636  {
637  return tf(vparam,uu);
638  }
639 
640  /// Provides the same functionality as Grad.
641  void VectorFunc(const Vector &vparam, Vector &uu, Vector &rr)
642  {
643  Grad(vparam,uu,rr);
644  }
645 
646  /// Returns the first derivative of TFunctor(...) with respect to the active
647  /// arguments proved in vector uu. The length of rr is the same as for uu.
648  void Grad(const Vector &vparam, Vector &uu, Vector &rr)
649  {
650  int n = uu.Size();
651  rr.SetSize(n);
652  ADFVector aduu(uu);
653  ADFType rez;
654  for (int ii = 0; ii < n; ii++)
655  {
656  aduu[ii].gradient = 1.0;
657  rez = ff(vparam, aduu);
658  rr[ii] = rez.gradient;
659  aduu[ii].gradient = 0.0;
660  }
661  }
662 
663  /// Provides same functionality as Hessian.
665  {
666  Hessian(vparam,uu,jac);
667  }
668 
669  /// Returns the Hessian of TFunctor(...) in the dense matrix jac. The
670  /// dimensions of jac are state_size x state_size, where state_size is the
671  /// length of vector uu.
673  {
674  int n = uu.Size();
675  jac.SetSize(n);
676  jac = 0.0;
677  {
678  ADSVector aduu(n);
679  for (int ii = 0; ii < n; ii++)
680  {
681  aduu[ii].value = ADFType{uu[ii], 0.0};
682  aduu[ii].gradient = ADFType{0.0, 0.0};
683  }
684 
685  for (int ii = 0; ii < n; ii++)
686  {
687  aduu[ii].value = ADFType{uu[ii], 1.0};
688  for (int jj = 0; jj < (ii + 1); jj++)
689  {
690  aduu[jj].gradient = ADFType{1.0, 0.0};
691  ADSType rez = sf(vparam, aduu);
692  jac(ii, jj) = rez.gradient.gradient;
693  jac(jj, ii) = rez.gradient.gradient;
694  aduu[jj].gradient = ADFType{0.0, 0.0};
695  }
696  aduu[ii].value = ADFType{uu[ii], 0.0};
697  }
698  }
699  }
700 
701 private:
702  TFunctor<double, const Vector, Vector, state_size, param_size> tf;
703  TFunctor<ADFType, const Vector, ADFVector, state_size, param_size> ff;
704  TFunctor<ADSType, const Vector, ADSVector, state_size, param_size> sf;
705 
706 };
707 
708 } // end namespace mfem
709 
710 #endif // NATIVE
711 
712 #endif // ADMFEM_HPP
void VectorFunc(const mfem::Vector &vparam, mfem::Vector &uu, mfem::Vector &rr)
Definition: admfem.hpp:151
void VectorFunc(const mfem::Vector &vparam, mfem::Vector &uu, mfem::Vector &rr)
Provides the same functionality as Grad.
Definition: admfem.hpp:254
void VectorFunc(const Vector &vparam, Vector &uu, Vector &rr)
Provides the same functionality as Grad.
Definition: admfem.hpp:641
void Grad(const Vector &vparam, Vector &uu, Vector &rr)
Definition: admfem.hpp:648
void SetSize(int s)
Resize the vector to size s.
Definition: vector.hpp:513
void Grad(const mfem::Vector &vparam, mfem::Vector &uu, mfem::Vector &rr)
Definition: admfem.hpp:261
Data type dense matrix using column-major storage.
Definition: densemat.hpp:23
int Size() const
Returns the size of the vector.
Definition: vector.hpp:200
codi::RealForwardGen< ADFType > ADSType
Definition: admfem.hpp:327
codi::RealForward ADFloatType
Forward AD type declaration.
Definition: admfem.hpp:30
double Eval(const Vector &vparam, Vector &uu)
Definition: admfem.hpp:635
codi::RealForwardGen< double > ADFType
Definition: admfem.hpp:323
VectorFuncAutoDiff(std::function< void(mfem::Vector &, ad::ADVectorType &, ad::ADVectorType &)> F_)
Definition: admfem.hpp:61
void VectorFunc(const Vector &vparam, Vector &uu, Vector &rr)
Definition: admfem.hpp:545
void Jacobian(mfem::Vector &vparam, mfem::Vector &uu, mfem::DenseMatrix &jac)
Definition: admfem.hpp:473
void Jacobian(mfem::Vector &vparam, mfem::Vector &uu, mfem::DenseMatrix &jac)
Definition: admfem.hpp:159
TAutoDiffDenseMatrix< ADFType > ADFDenseMatrix
Definition: admfem.hpp:325
void Jacobian(mfem::Vector &vparam, mfem::Vector &uu, mfem::DenseMatrix &jac)
Provides same functionality as Hessian.
Definition: admfem.hpp:316
TAutoDiffVector< ADFloatType > ADVectorType
Vector type for AD-numbers.
Definition: admfem.hpp:32
TAutoDiffVector< ADSType > ADSVector
Definition: admfem.hpp:328
Templated vector data type.
Definition: tadvector.hpp:40
codi::RealReverseGen< ADFType > ADSType
Definition: admfem.hpp:336
TAutoDiffDenseMatrix< ADSType > ADSDenseMatrix
Definition: admfem.hpp:329
Vector data type.
Definition: vector.hpp:60
void Hessian(mfem::Vector &vparam, mfem::Vector &uu, mfem::DenseMatrix &jac)
Definition: admfem.hpp:345
TAutoDiffVector< ADFType > ADFVector
Definition: admfem.hpp:324
void SetSize(int s)
Change the size of the DenseMatrix to s x s.
Definition: densemat.hpp:105
Templated dense matrix data type.
Definition: taddensemat.hpp:34
void Jacobian(mfem::Vector &vparam, mfem::Vector &vstate, mfem::DenseMatrix &jac)
Definition: admfem.hpp:70
double Eval(const mfem::Vector &vparam, mfem::Vector &uu)
Definition: admfem.hpp:248
TAutoDiffDenseMatrix< ADFloatType > ADMatrixType
Matrix type for AD-numbers.
Definition: admfem.hpp:34