MFEM  v4.5.1
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
vector.cpp
Go to the documentation of this file.
1 // Copyright (c) 2010-2022, Lawrence Livermore National Security, LLC. Produced
2 // at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3 // LICENSE and NOTICE for details. LLNL-CODE-806117.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability visit https://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the BSD-3 license. We welcome feedback and contributions, see file
10 // CONTRIBUTING.md for details.
11 
12 // Implementation of data type vector
13 
14 #include "kernels.hpp"
15 #include "vector.hpp"
16 #include "../general/forall.hpp"
17 
18 #if defined(MFEM_USE_SUNDIALS)
19 #include "sundials.hpp"
20 #if defined(MFEM_USE_MPI)
21 #include <nvector/nvector_parallel.h>
22 #endif
23 #endif
24 
25 #ifdef MFEM_USE_OPENMP
26 #include <omp.h>
27 #endif
28 
29 #include <iostream>
30 #include <iomanip>
31 #include <cmath>
32 #include <cstdlib>
33 #include <ctime>
34 #include <limits>
35 
36 namespace mfem
37 {
38 
40 {
41  const int s = v.Size();
42  size = s;
43  if (s > 0)
44  {
45  MFEM_ASSERT(!v.data.Empty(), "invalid source vector");
46  data.New(s, v.data.GetMemoryType());
47  data.CopyFrom(v.data, s);
48  }
49  UseDevice(v.UseDevice());
50 }
51 
53 {
54  *this = std::move(v);
55 }
56 
57 void Vector::Load(std::istream **in, int np, int *dim)
58 {
59  int i, j, s;
60 
61  s = 0;
62  for (i = 0; i < np; i++)
63  {
64  s += dim[i];
65  }
66 
67  SetSize(s);
68 
69  int p = 0;
70  for (i = 0; i < np; i++)
71  {
72  for (j = 0; j < dim[i]; j++)
73  {
74  *in[i] >> data[p++];
75  // Clang's libc++ sets the failbit when (correctly) parsing subnormals,
76  // so we reset the failbit here.
77  if (!*in[i] && errno == ERANGE)
78  {
79  in[i]->clear();
80  }
81  }
82  }
83 }
84 
85 void Vector::Load(std::istream &in, int Size)
86 {
87  SetSize(Size);
88 
89  for (int i = 0; i < size; i++)
90  {
91  in >> data[i];
92  // Clang's libc++ sets the failbit when (correctly) parsing subnormals,
93  // so we reset the failbit here.
94  if (!in && errno == ERANGE)
95  {
96  in.clear();
97  }
98  }
99 }
100 
101 double &Vector::Elem(int i)
102 {
103  return operator()(i);
104 }
105 
106 const double &Vector::Elem(int i) const
107 {
108  return operator()(i);
109 }
110 
111 double Vector::operator*(const double *v) const
112 {
113  double dot = 0.0;
114 #ifdef MFEM_USE_LEGACY_OPENMP
115  #pragma omp parallel for reduction(+:dot)
116 #endif
117  for (int i = 0; i < size; i++)
118  {
119  dot += data[i] * v[i];
120  }
121  return dot;
122 }
123 
124 Vector &Vector::operator=(const double *v)
125 {
126  data.CopyFromHost(v, size);
127  return *this;
128 }
129 
131 {
132 #if 0
133  SetSize(v.Size(), v.data.GetMemoryType());
134  data.CopyFrom(v.data, v.Size());
135  UseDevice(v.UseDevice());
136 #else
137  SetSize(v.Size());
138  bool vuse = v.UseDevice();
139  const bool use_dev = UseDevice() || vuse;
140  v.UseDevice(use_dev);
141  // keep 'data' where it is, unless 'use_dev' is true
142  if (use_dev) { Write(); }
143  data.CopyFrom(v.data, v.Size());
144  v.UseDevice(vuse);
145 #endif
146  return *this;
147 }
148 
150 {
151  data = std::move(v.data);
152  size = v.size;
153  v.data.Reset();
154  v.size = 0;
155  return *this;
156 }
157 
158 Vector &Vector::operator=(double value)
159 {
160  const bool use_dev = UseDevice();
161  const int N = size;
162  auto y = Write(use_dev);
163  MFEM_FORALL_SWITCH(use_dev, i, N, y[i] = value;);
164  return *this;
165 }
166 
168 {
169  const bool use_dev = UseDevice();
170  const int N = size;
171  auto y = ReadWrite(use_dev);
172  MFEM_FORALL_SWITCH(use_dev, i, N, y[i] *= c;);
173  return *this;
174 }
175 
177 {
178  MFEM_ASSERT(size == v.size, "incompatible Vectors!");
179 
180  const bool use_dev = UseDevice() || v.UseDevice();
181  const int N = size;
182  auto y = ReadWrite(use_dev);
183  auto x = v.Read(use_dev);
184  MFEM_FORALL_SWITCH(use_dev, i, N, y[i] *= x[i];);
185  return *this;
186 }
187 
189 {
190  const bool use_dev = UseDevice();
191  const int N = size;
192  const double m = 1.0/c;
193  auto y = ReadWrite(use_dev);
194  MFEM_FORALL_SWITCH(use_dev, i, N, y[i] *= m;);
195  return *this;
196 }
197 
199 {
200  MFEM_ASSERT(size == v.size, "incompatible Vectors!");
201 
202  const bool use_dev = UseDevice() || v.UseDevice();
203  const int N = size;
204  auto y = ReadWrite(use_dev);
205  auto x = v.Read(use_dev);
206  MFEM_FORALL_SWITCH(use_dev, i, N, y[i] /= x[i];);
207  return *this;
208 }
209 
211 {
212  const bool use_dev = UseDevice();
213  const int N = size;
214  auto y = ReadWrite(use_dev);
215  MFEM_FORALL_SWITCH(use_dev, i, N, y[i] -= c;);
216  return *this;
217 }
218 
220 {
221  MFEM_ASSERT(size == v.size, "incompatible Vectors!");
222 
223  const bool use_dev = UseDevice() || v.UseDevice();
224  const int N = size;
225  auto y = ReadWrite(use_dev);
226  auto x = v.Read(use_dev);
227  MFEM_FORALL_SWITCH(use_dev, i, N, y[i] -= x[i];);
228  return *this;
229 }
230 
232 {
233  const bool use_dev = UseDevice();
234  const int N = size;
235  auto y = ReadWrite(use_dev);
236  MFEM_FORALL_SWITCH(use_dev, i, N, y[i] += c;);
237  return *this;
238 }
239 
241 {
242  MFEM_ASSERT(size == v.size, "incompatible Vectors!");
243 
244  const bool use_dev = UseDevice() || v.UseDevice();
245  const int N = size;
246  auto y = ReadWrite(use_dev);
247  auto x = v.Read(use_dev);
248  MFEM_FORALL_SWITCH(use_dev, i, N, y[i] += x[i];);
249  return *this;
250 }
251 
252 Vector &Vector::Add(const double a, const Vector &Va)
253 {
254  MFEM_ASSERT(size == Va.size, "incompatible Vectors!");
255 
256  if (a != 0.0)
257  {
258  const int N = size;
259  const bool use_dev = UseDevice() || Va.UseDevice();
260  auto y = ReadWrite(use_dev);
261  auto x = Va.Read(use_dev);
262  MFEM_FORALL_SWITCH(use_dev, i, N, y[i] += a * x[i];);
263  }
264  return *this;
265 }
266 
267 Vector &Vector::Set(const double a, const Vector &Va)
268 {
269  MFEM_ASSERT(size == Va.size, "incompatible Vectors!");
270 
271  const bool use_dev = UseDevice() || Va.UseDevice();
272  const int N = size;
273  auto x = Va.Read(use_dev);
274  auto y = Write(use_dev);
275  MFEM_FORALL_SWITCH(use_dev, i, N, y[i] = a * x[i];);
276  return *this;
277 }
278 
279 void Vector::SetVector(const Vector &v, int offset)
280 {
281  MFEM_ASSERT(v.Size() + offset <= size, "invalid sub-vector");
282 
283  const int vs = v.Size();
284  const double *vp = v.data;
285  double *p = data + offset;
286  for (int i = 0; i < vs; i++)
287  {
288  p[i] = vp[i];
289  }
290 }
291 
292 void Vector::AddSubVector(const Vector &v, int offset)
293 {
294  MFEM_ASSERT(v.Size() + offset <= size, "invalid sub-vector");
295 
296  const int vs = v.Size();
297  const double *vp = v.data;
298  double *p = data + offset;
299  for (int i = 0; i < vs; i++)
300  {
301  p[i] += vp[i];
302  }
303 }
304 
306 {
307  const bool use_dev = UseDevice();
308  const int N = size;
309  auto y = ReadWrite(use_dev);
310  MFEM_FORALL_SWITCH(use_dev, i, N, y[i] = -y[i];);
311 }
312 
313 void add(const Vector &v1, const Vector &v2, Vector &v)
314 {
315  MFEM_ASSERT(v.size == v1.size && v.size == v2.size,
316  "incompatible Vectors!");
317 
318 #if !defined(MFEM_USE_LEGACY_OPENMP)
319  const bool use_dev = v1.UseDevice() || v2.UseDevice() || v.UseDevice();
320  const int N = v.size;
321  // Note: get read access first, in case v is the same as v1/v2.
322  auto x1 = v1.Read(use_dev);
323  auto x2 = v2.Read(use_dev);
324  auto y = v.Write(use_dev);
325  MFEM_FORALL_SWITCH(use_dev, i, N, y[i] = x1[i] + x2[i];);
326 #else
327  #pragma omp parallel for
328  for (int i = 0; i < v.size; i++)
329  {
330  v.data[i] = v1.data[i] + v2.data[i];
331  }
332 #endif
333 }
334 
335 void add(const Vector &v1, double alpha, const Vector &v2, Vector &v)
336 {
337  MFEM_ASSERT(v.size == v1.size && v.size == v2.size,
338  "incompatible Vectors!");
339 
340  if (alpha == 0.0)
341  {
342  v = v1;
343  }
344  else if (alpha == 1.0)
345  {
346  add(v1, v2, v);
347  }
348  else
349  {
350 #if !defined(MFEM_USE_LEGACY_OPENMP)
351  const bool use_dev = v1.UseDevice() || v2.UseDevice() || v.UseDevice();
352  const int N = v.size;
353  // Note: get read access first, in case v is the same as v1/v2.
354  auto d_x = v1.Read(use_dev);
355  auto d_y = v2.Read(use_dev);
356  auto d_z = v.Write(use_dev);
357  MFEM_FORALL_SWITCH(use_dev, i, N, d_z[i] = d_x[i] + alpha * d_y[i];);
358 #else
359  const double *v1p = v1.data, *v2p = v2.data;
360  double *vp = v.data;
361  const int s = v.size;
362  #pragma omp parallel for
363  for (int i = 0; i < s; i++)
364  {
365  vp[i] = v1p[i] + alpha*v2p[i];
366  }
367 #endif
368  }
369 }
370 
371 void add(const double a, const Vector &x, const Vector &y, Vector &z)
372 {
373  MFEM_ASSERT(x.size == y.size && x.size == z.size,
374  "incompatible Vectors!");
375 
376  if (a == 0.0)
377  {
378  z = 0.0;
379  }
380  else if (a == 1.0)
381  {
382  add(x, y, z);
383  }
384  else
385  {
386 #if !defined(MFEM_USE_LEGACY_OPENMP)
387  const bool use_dev = x.UseDevice() || y.UseDevice() || z.UseDevice();
388  const int N = x.size;
389  // Note: get read access first, in case z is the same as x/y.
390  auto xd = x.Read(use_dev);
391  auto yd = y.Read(use_dev);
392  auto zd = z.Write(use_dev);
393  MFEM_FORALL_SWITCH(use_dev, i, N, zd[i] = a * (xd[i] + yd[i]););
394 #else
395  const double *xp = x.data;
396  const double *yp = y.data;
397  double *zp = z.data;
398  const int s = x.size;
399  #pragma omp parallel for
400  for (int i = 0; i < s; i++)
401  {
402  zp[i] = a * (xp[i] + yp[i]);
403  }
404 #endif
405  }
406 }
407 
408 void add(const double a, const Vector &x,
409  const double b, const Vector &y, Vector &z)
410 {
411  MFEM_ASSERT(x.size == y.size && x.size == z.size,
412  "incompatible Vectors!");
413 
414  if (a == 0.0)
415  {
416  z.Set(b, y);
417  }
418  else if (b == 0.0)
419  {
420  z.Set(a, x);
421  }
422 #if 0
423  else if (a == 1.0)
424  {
425  add(x, b, y, z);
426  }
427  else if (b == 1.0)
428  {
429  add(y, a, x, z);
430  }
431  else if (a == b)
432  {
433  add(a, x, y, z);
434  }
435 #endif
436  else
437  {
438 #if !defined(MFEM_USE_LEGACY_OPENMP)
439  const bool use_dev = x.UseDevice() || y.UseDevice() || z.UseDevice();
440  const int N = x.size;
441  // Note: get read access first, in case z is the same as x/y.
442  auto xd = x.Read(use_dev);
443  auto yd = y.Read(use_dev);
444  auto zd = z.Write(use_dev);
445  MFEM_FORALL_SWITCH(use_dev, i, N, zd[i] = a * xd[i] + b * yd[i];);
446 #else
447  const double *xp = x.data;
448  const double *yp = y.data;
449  double *zp = z.data;
450  const int s = x.size;
451  #pragma omp parallel for
452  for (int i = 0; i < s; i++)
453  {
454  zp[i] = a * xp[i] + b * yp[i];
455  }
456 #endif
457  }
458 }
459 
460 void subtract(const Vector &x, const Vector &y, Vector &z)
461 {
462  MFEM_ASSERT(x.size == y.size && x.size == z.size,
463  "incompatible Vectors!");
464 
465 #if !defined(MFEM_USE_LEGACY_OPENMP)
466  const bool use_dev = x.UseDevice() || y.UseDevice() || z.UseDevice();
467  const int N = x.size;
468  // Note: get read access first, in case z is the same as x/y.
469  auto xd = x.Read(use_dev);
470  auto yd = y.Read(use_dev);
471  auto zd = z.Write(use_dev);
472  MFEM_FORALL_SWITCH(use_dev, i, N, zd[i] = xd[i] - yd[i];);
473 #else
474  const double *xp = x.data;
475  const double *yp = y.data;
476  double *zp = z.data;
477  const int s = x.size;
478  #pragma omp parallel for
479  for (int i = 0; i < s; i++)
480  {
481  zp[i] = xp[i] - yp[i];
482  }
483 #endif
484 }
485 
486 void subtract(const double a, const Vector &x, const Vector &y, Vector &z)
487 {
488  MFEM_ASSERT(x.size == y.size && x.size == z.size,
489  "incompatible Vectors!");
490 
491  if (a == 0.)
492  {
493  z = 0.;
494  }
495  else if (a == 1.)
496  {
497  subtract(x, y, z);
498  }
499  else
500  {
501 #if !defined(MFEM_USE_LEGACY_OPENMP)
502  const bool use_dev = x.UseDevice() || y.UseDevice() || z.UseDevice();
503  const int N = x.size;
504  // Note: get read access first, in case z is the same as x/y.
505  auto xd = x.Read(use_dev);
506  auto yd = y.Read(use_dev);
507  auto zd = z.Write(use_dev);
508  MFEM_FORALL_SWITCH(use_dev, i, N, zd[i] = a * (xd[i] - yd[i]););
509 #else
510  const double *xp = x.data;
511  const double *yp = y.data;
512  double *zp = z.data;
513  const int s = x.size;
514  #pragma omp parallel for
515  for (int i = 0; i < s; i++)
516  {
517  zp[i] = a * (xp[i] - yp[i]);
518  }
519 #endif
520  }
521 }
522 
523 void Vector::median(const Vector &lo, const Vector &hi)
524 {
525  MFEM_ASSERT(size == lo.size && size == hi.size,
526  "incompatible Vectors!");
527 
528  const bool use_dev = UseDevice() || lo.UseDevice() || hi.UseDevice();
529  const int N = size;
530  // Note: get read access first, in case *this is the same as lo/hi.
531  auto l = lo.Read(use_dev);
532  auto h = hi.Read(use_dev);
533  auto m = Write(use_dev);
534  MFEM_FORALL_SWITCH(use_dev, i, N,
535  {
536  if (m[i] < l[i])
537  {
538  m[i] = l[i];
539  }
540  else if (m[i] > h[i])
541  {
542  m[i] = h[i];
543  }
544  });
545 }
546 
547 void Vector::GetSubVector(const Array<int> &dofs, Vector &elemvect) const
548 {
549  const int n = dofs.Size();
550  elemvect.SetSize(n);
551  const bool use_dev = dofs.UseDevice() || elemvect.UseDevice();
552  auto d_y = elemvect.Write(use_dev);
553  auto d_X = Read(use_dev);
554  auto d_dofs = dofs.Read(use_dev);
555  MFEM_FORALL_SWITCH(use_dev, i, n,
556  {
557  const int dof_i = d_dofs[i];
558  d_y[i] = dof_i >= 0 ? d_X[dof_i] : -d_X[-dof_i-1];
559  });
560 }
561 
562 void Vector::GetSubVector(const Array<int> &dofs, double *elem_data) const
563 {
565  const int n = dofs.Size();
566  for (int i = 0; i < n; i++)
567  {
568  const int j = dofs[i];
569  elem_data[i] = (j >= 0) ? data[j] : -data[-1-j];
570  }
571 }
572 
573 void Vector::SetSubVector(const Array<int> &dofs, const double value)
574 {
575  const bool use_dev = dofs.UseDevice();
576  const int n = dofs.Size();
577  // Use read+write access for *this - we only modify some of its entries
578  auto d_X = ReadWrite(use_dev);
579  auto d_dofs = dofs.Read(use_dev);
580  MFEM_FORALL_SWITCH(use_dev, i, n,
581  {
582  const int j = d_dofs[i];
583  if (j >= 0)
584  {
585  d_X[j] = value;
586  }
587  else
588  {
589  d_X[-1-j] = -value;
590  }
591  });
592 }
593 
594 void Vector::SetSubVector(const Array<int> &dofs, const Vector &elemvect)
595 {
596  MFEM_ASSERT(dofs.Size() == elemvect.Size(),
597  "Size mismatch: length of dofs is " << dofs.Size()
598  << ", length of elemvect is " << elemvect.Size());
599 
600  const bool use_dev = dofs.UseDevice() || elemvect.UseDevice();
601  const int n = dofs.Size();
602  // Use read+write access for X - we only modify some of its entries
603  auto d_X = ReadWrite(use_dev);
604  auto d_y = elemvect.Read(use_dev);
605  auto d_dofs = dofs.Read(use_dev);
606  MFEM_FORALL_SWITCH(use_dev, i, n,
607  {
608  const int dof_i = d_dofs[i];
609  if (dof_i >= 0)
610  {
611  d_X[dof_i] = d_y[i];
612  }
613  else
614  {
615  d_X[-1-dof_i] = -d_y[i];
616  }
617  });
618 }
619 
620 void Vector::SetSubVector(const Array<int> &dofs, double *elem_data)
621 {
622  // Use read+write access because we overwrite only part of the data.
624  const int n = dofs.Size();
625  for (int i = 0; i < n; i++)
626  {
627  const int j= dofs[i];
628  if (j >= 0)
629  {
630  operator()(j) = elem_data[i];
631  }
632  else
633  {
634  operator()(-1-j) = -elem_data[i];
635  }
636  }
637 }
638 
639 void Vector::AddElementVector(const Array<int> &dofs, const Vector &elemvect)
640 {
641  MFEM_ASSERT(dofs.Size() == elemvect.Size(), "Size mismatch: "
642  "length of dofs is " << dofs.Size() <<
643  ", length of elemvect is " << elemvect.Size());
644 
645  const bool use_dev = dofs.UseDevice() || elemvect.UseDevice();
646  const int n = dofs.Size();
647  auto d_y = elemvect.Read(use_dev);
648  auto d_X = ReadWrite(use_dev);
649  auto d_dofs = dofs.Read(use_dev);
650  MFEM_FORALL_SWITCH(use_dev, i, n,
651  {
652  const int j = d_dofs[i];
653  if (j >= 0)
654  {
655  d_X[j] += d_y[i];
656  }
657  else
658  {
659  d_X[-1-j] -= d_y[i];
660  }
661  });
662 }
663 
664 void Vector::AddElementVector(const Array<int> &dofs, double *elem_data)
665 {
667  const int n = dofs.Size();
668  for (int i = 0; i < n; i++)
669  {
670  const int j = dofs[i];
671  if (j >= 0)
672  {
673  operator()(j) += elem_data[i];
674  }
675  else
676  {
677  operator()(-1-j) -= elem_data[i];
678  }
679  }
680 }
681 
682 void Vector::AddElementVector(const Array<int> &dofs, const double a,
683  const Vector &elemvect)
684 {
685  MFEM_ASSERT(dofs.Size() == elemvect.Size(), "Size mismatch: "
686  "length of dofs is " << dofs.Size() <<
687  ", length of elemvect is " << elemvect.Size());
688 
689  const bool use_dev = dofs.UseDevice() || elemvect.UseDevice();
690  const int n = dofs.Size();
691  auto d_y = ReadWrite(use_dev);
692  auto d_x = elemvect.Read(use_dev);
693  auto d_dofs = dofs.Read(use_dev);
694  MFEM_FORALL_SWITCH(use_dev, i, n,
695  {
696  const int j = d_dofs[i];
697  if (j >= 0)
698  {
699  d_y[j] += a * d_x[i];
700  }
701  else
702  {
703  d_y[-1-j] -= a * d_x[i];
704  }
705  });
706 }
707 
708 void Vector::SetSubVectorComplement(const Array<int> &dofs, const double val)
709 {
710  const bool use_dev = UseDevice() || dofs.UseDevice();
711  const int n = dofs.Size();
712  const int N = size;
713  Vector dofs_vals(n, use_dev ?
716  auto d_data = ReadWrite(use_dev);
717  auto d_dofs_vals = dofs_vals.Write(use_dev);
718  auto d_dofs = dofs.Read(use_dev);
719  MFEM_FORALL_SWITCH(use_dev, i, n, d_dofs_vals[i] = d_data[d_dofs[i]];);
720  MFEM_FORALL_SWITCH(use_dev, i, N, d_data[i] = val;);
721  MFEM_FORALL_SWITCH(use_dev, i, n, d_data[d_dofs[i]] = d_dofs_vals[i];);
722 }
723 
724 void Vector::Print(std::ostream &os, int width) const
725 {
726  if (!size) { return; }
728  for (int i = 0; 1; )
729  {
730  os << ZeroSubnormal(data[i]);
731  i++;
732  if (i == size)
733  {
734  break;
735  }
736  if ( i % width == 0 )
737  {
738  os << '\n';
739  }
740  else
741  {
742  os << ' ';
743  }
744  }
745  os << '\n';
746 }
747 
748 #ifdef MFEM_USE_ADIOS2
750  const std::string& variable_name) const
751 {
752  if (!size) { return; }
754  os.engine.Put(variable_name, &data[0] );
755 }
756 #endif
757 
758 void Vector::Print_HYPRE(std::ostream &os) const
759 {
760  int i;
761  std::ios::fmtflags old_fmt = os.flags();
762  os.setf(std::ios::scientific);
763  std::streamsize old_prec = os.precision(14);
764 
765  os << size << '\n'; // number of rows
766 
767  data.Read(MemoryClass::HOST, size);
768  for (i = 0; i < size; i++)
769  {
770  os << ZeroSubnormal(data[i]) << '\n';
771  }
772 
773  os.precision(old_prec);
774  os.flags(old_fmt);
775 }
776 
777 void Vector::PrintHash(std::ostream &os) const
778 {
779  os << "size: " << size << '\n';
780  HashFunction hf;
781  hf.AppendDoubles(HostRead(), size);
782  os << "hash: " << hf.GetHash() << '\n';
783 }
784 
785 void Vector::Randomize(int seed)
786 {
787  const double max = (double)(RAND_MAX) + 1.;
788 
789  if (!global_seed_set)
790  {
791  if (seed == 0)
792  {
793  seed = (int)time(0);
794  }
795 
796  srand((unsigned)seed);
797  }
798 
799  HostWrite();
800  for (int i = 0; i < size; i++)
801  {
802  data[i] = std::abs(rand()/max);
803  }
804 }
805 
806 void Vector::SetGlobalSeed(int gseed)
807 {
808  srand((unsigned)gseed);
809  global_seed_set = true;
810 }
811 
812 double Vector::Norml2() const
813 {
814  // Scale entries of Vector on the fly, using algorithms from
815  // std::hypot() and LAPACK's drm2. This scaling ensures that the
816  // argument of each call to std::pow is <= 1 to avoid overflow.
817  if (0 == size)
818  {
819  return 0.0;
820  } // end if 0 == size
821 
823  if (1 == size)
824  {
825  return std::abs(data[0]);
826  } // end if 1 == size
827  return kernels::Norml2(size, (const double*) data);
828 }
829 
830 double Vector::Normlinf() const
831 {
832  HostRead();
833  double max = 0.0;
834  for (int i = 0; i < size; i++)
835  {
836  max = std::max(std::abs(data[i]), max);
837  }
838  return max;
839 }
840 
841 double Vector::Norml1() const
842 {
843  HostRead();
844  double sum = 0.0;
845  for (int i = 0; i < size; i++)
846  {
847  sum += std::abs(data[i]);
848  }
849  return sum;
850 }
851 
852 double Vector::Normlp(double p) const
853 {
854  MFEM_ASSERT(p > 0.0, "Vector::Normlp");
855 
856  if (p == 1.0)
857  {
858  return Norml1();
859  }
860  if (p == 2.0)
861  {
862  return Norml2();
863  }
864  if (p < infinity())
865  {
866  // Scale entries of Vector on the fly, using algorithms from
867  // std::hypot() and LAPACK's drm2. This scaling ensures that the
868  // argument of each call to std::pow is <= 1 to avoid overflow.
869  if (0 == size)
870  {
871  return 0.0;
872  } // end if 0 == size
873 
874  if (1 == size)
875  {
876  return std::abs(data[0]);
877  } // end if 1 == size
878 
879  double scale = 0.0;
880  double sum = 0.0;
881 
882  for (int i = 0; i < size; i++)
883  {
884  if (data[i] != 0.0)
885  {
886  const double absdata = std::abs(data[i]);
887  if (scale <= absdata)
888  {
889  sum = 1.0 + sum * std::pow(scale / absdata, p);
890  scale = absdata;
891  continue;
892  } // end if scale <= absdata
893  sum += std::pow(absdata / scale, p); // else scale > absdata
894  } // end if data[i] != 0
895  }
896  return scale * std::pow(sum, 1.0/p);
897  } // end if p < infinity()
898 
899  return Normlinf(); // else p >= infinity()
900 }
901 
902 double Vector::Max() const
903 {
904  if (size == 0) { return -infinity(); }
905 
906  HostRead();
907  double max = data[0];
908 
909  for (int i = 1; i < size; i++)
910  {
911  if (data[i] > max)
912  {
913  max = data[i];
914  }
915  }
916 
917  return max;
918 }
919 
920 double Vector::Sum() const
921 {
922  double sum = 0.0;
923 
924  const double *h_data = this->HostRead();
925  for (int i = 0; i < size; i++)
926  {
927  sum += h_data[i];
928  }
929 
930  return sum;
931 }
932 
933 #ifdef MFEM_USE_CUDA
934 static __global__ void cuKernelMin(const int N, double *gdsr, const double *x)
935 {
936  __shared__ double s_min[MFEM_CUDA_BLOCKS];
937  const int n = blockDim.x*blockIdx.x + threadIdx.x;
938  if (n>=N) { return; }
939  const int bid = blockIdx.x;
940  const int tid = threadIdx.x;
941  const int bbd = bid*blockDim.x;
942  const int rid = bbd+tid;
943  s_min[tid] = x[n];
944  for (int workers=blockDim.x>>1; workers>0; workers>>=1)
945  {
946  __syncthreads();
947  if (tid >= workers) { continue; }
948  if (rid >= N) { continue; }
949  const int dualTid = tid + workers;
950  if (dualTid >= N) { continue; }
951  const int rdd = bbd+dualTid;
952  if (rdd >= N) { continue; }
953  if (dualTid >= blockDim.x) { continue; }
954  s_min[tid] = fmin(s_min[tid], s_min[dualTid]);
955  }
956  if (tid==0) { gdsr[bid] = s_min[0]; }
957 }
958 
959 static Array<double> cuda_reduce_buf;
960 
961 static double cuVectorMin(const int N, const double *X)
962 {
963  const int tpb = MFEM_CUDA_BLOCKS;
964  const int blockSize = MFEM_CUDA_BLOCKS;
965  const int gridSize = (N+blockSize-1)/blockSize;
966  const int min_sz = (N%tpb)==0? (N/tpb) : (1+N/tpb);
967  cuda_reduce_buf.SetSize(min_sz);
968  Memory<double> &buf = cuda_reduce_buf.GetMemory();
969  double *d_min = buf.Write(MemoryClass::DEVICE, min_sz);
970  cuKernelMin<<<gridSize,blockSize>>>(N, d_min, X);
971  MFEM_GPU_CHECK(cudaGetLastError());
972  const double *h_min = buf.Read(MemoryClass::HOST, min_sz);
974  for (int i = 0; i < min_sz; i++) { min = fmin(min, h_min[i]); }
975  return min;
976 }
977 
978 static __global__ void cuKernelDot(const int N, double *gdsr,
979  const double *x, const double *y)
980 {
981  __shared__ double s_dot[MFEM_CUDA_BLOCKS];
982  const int n = blockDim.x*blockIdx.x + threadIdx.x;
983  if (n>=N) { return; }
984  const int bid = blockIdx.x;
985  const int tid = threadIdx.x;
986  const int bbd = bid*blockDim.x;
987  const int rid = bbd+tid;
988  s_dot[tid] = x[n] * y[n];
989  for (int workers=blockDim.x>>1; workers>0; workers>>=1)
990  {
991  __syncthreads();
992  if (tid >= workers) { continue; }
993  if (rid >= N) { continue; }
994  const int dualTid = tid + workers;
995  if (dualTid >= N) { continue; }
996  const int rdd = bbd+dualTid;
997  if (rdd >= N) { continue; }
998  if (dualTid >= blockDim.x) { continue; }
999  s_dot[tid] += s_dot[dualTid];
1000  }
1001  if (tid==0) { gdsr[bid] = s_dot[0]; }
1002 }
1003 
1004 static double cuVectorDot(const int N, const double *X, const double *Y)
1005 {
1006  const int tpb = MFEM_CUDA_BLOCKS;
1007  const int blockSize = MFEM_CUDA_BLOCKS;
1008  const int gridSize = (N+blockSize-1)/blockSize;
1009  const int dot_sz = (N%tpb)==0? (N/tpb) : (1+N/tpb);
1010  cuda_reduce_buf.SetSize(dot_sz, Device::GetDeviceMemoryType());
1011  Memory<double> &buf = cuda_reduce_buf.GetMemory();
1012  double *d_dot = buf.Write(MemoryClass::DEVICE, dot_sz);
1013  cuKernelDot<<<gridSize,blockSize>>>(N, d_dot, X, Y);
1014  MFEM_GPU_CHECK(cudaGetLastError());
1015  const double *h_dot = buf.Read(MemoryClass::HOST, dot_sz);
1016  double dot = 0.0;
1017  for (int i = 0; i < dot_sz; i++) { dot += h_dot[i]; }
1018  return dot;
1019 }
1020 #endif // MFEM_USE_CUDA
1021 
1022 #ifdef MFEM_USE_HIP
1023 static __global__ void hipKernelMin(const int N, double *gdsr, const double *x)
1024 {
1025  __shared__ double s_min[MFEM_HIP_BLOCKS];
1026  const int n = hipBlockDim_x*hipBlockIdx_x + hipThreadIdx_x;
1027  if (n>=N) { return; }
1028  const int bid = hipBlockIdx_x;
1029  const int tid = hipThreadIdx_x;
1030  const int bbd = bid*hipBlockDim_x;
1031  const int rid = bbd+tid;
1032  s_min[tid] = x[n];
1033  for (int workers=hipBlockDim_x>>1; workers>0; workers>>=1)
1034  {
1035  __syncthreads();
1036  if (tid >= workers) { continue; }
1037  if (rid >= N) { continue; }
1038  const int dualTid = tid + workers;
1039  if (dualTid >= N) { continue; }
1040  const int rdd = bbd+dualTid;
1041  if (rdd >= N) { continue; }
1042  if (dualTid >= hipBlockDim_x) { continue; }
1043  s_min[tid] = fmin(s_min[tid], s_min[dualTid]);
1044  }
1045  if (tid==0) { gdsr[bid] = s_min[0]; }
1046 }
1047 
1048 static Array<double> cuda_reduce_buf;
1049 
1050 static double hipVectorMin(const int N, const double *X)
1051 {
1052  const int tpb = MFEM_HIP_BLOCKS;
1053  const int blockSize = MFEM_HIP_BLOCKS;
1054  const int gridSize = (N+blockSize-1)/blockSize;
1055  const int min_sz = (N%tpb)==0 ? (N/tpb) : (1+N/tpb);
1056  cuda_reduce_buf.SetSize(min_sz);
1057  Memory<double> &buf = cuda_reduce_buf.GetMemory();
1058  double *d_min = buf.Write(MemoryClass::DEVICE, min_sz);
1059  hipLaunchKernelGGL(hipKernelMin,gridSize,blockSize,0,0,N,d_min,X);
1060  MFEM_GPU_CHECK(hipGetLastError());
1061  const double *h_min = buf.Read(MemoryClass::HOST, min_sz);
1063  for (int i = 0; i < min_sz; i++) { min = fmin(min, h_min[i]); }
1064  return min;
1065 }
1066 
1067 static __global__ void hipKernelDot(const int N, double *gdsr,
1068  const double *x, const double *y)
1069 {
1070  __shared__ double s_dot[MFEM_HIP_BLOCKS];
1071  const int n = hipBlockDim_x*hipBlockIdx_x + hipThreadIdx_x;
1072  if (n>=N) { return; }
1073  const int bid = hipBlockIdx_x;
1074  const int tid = hipThreadIdx_x;
1075  const int bbd = bid*hipBlockDim_x;
1076  const int rid = bbd+tid;
1077  s_dot[tid] = x[n] * y[n];
1078  for (int workers=hipBlockDim_x>>1; workers>0; workers>>=1)
1079  {
1080  __syncthreads();
1081  if (tid >= workers) { continue; }
1082  if (rid >= N) { continue; }
1083  const int dualTid = tid + workers;
1084  if (dualTid >= N) { continue; }
1085  const int rdd = bbd+dualTid;
1086  if (rdd >= N) { continue; }
1087  if (dualTid >= hipBlockDim_x) { continue; }
1088  s_dot[tid] += s_dot[dualTid];
1089  }
1090  if (tid==0) { gdsr[bid] = s_dot[0]; }
1091 }
1092 
1093 static double hipVectorDot(const int N, const double *X, const double *Y)
1094 {
1095  const int tpb = MFEM_HIP_BLOCKS;
1096  const int blockSize = MFEM_HIP_BLOCKS;
1097  const int gridSize = (N+blockSize-1)/blockSize;
1098  const int dot_sz = (N%tpb)==0 ? (N/tpb) : (1+N/tpb);
1099  cuda_reduce_buf.SetSize(dot_sz);
1100  Memory<double> &buf = cuda_reduce_buf.GetMemory();
1101  double *d_dot = buf.Write(MemoryClass::DEVICE, dot_sz);
1102  hipLaunchKernelGGL(hipKernelDot,gridSize,blockSize,0,0,N,d_dot,X,Y);
1103  MFEM_GPU_CHECK(hipGetLastError());
1104  const double *h_dot = buf.Read(MemoryClass::HOST, dot_sz);
1105  double dot = 0.0;
1106  for (int i = 0; i < dot_sz; i++) { dot += h_dot[i]; }
1107  return dot;
1108 }
1109 #endif // MFEM_USE_HIP
1110 
1111 double Vector::operator*(const Vector &v) const
1112 {
1113  MFEM_ASSERT(size == v.size, "incompatible Vectors!");
1114  if (size == 0) { return 0.0; }
1115 
1116  const bool use_dev = UseDevice() || v.UseDevice();
1117 #if defined(MFEM_USE_CUDA) || defined(MFEM_USE_HIP) || defined(MFEM_USE_OPENMP)
1118  auto m_data = Read(use_dev);
1119 #else
1120  Read(use_dev);
1121 #endif
1122  auto v_data = v.Read(use_dev);
1123 
1124  if (!use_dev) { goto vector_dot_cpu; }
1125 
1126 #ifdef MFEM_USE_OCCA
1127  if (DeviceCanUseOcca())
1128  {
1129  return occa::linalg::dot<double,double,double>(
1131  }
1132 #endif
1133 
1134 #ifdef MFEM_USE_CUDA
1136  {
1137  return cuVectorDot(size, m_data, v_data);
1138  }
1139 #endif
1140 
1141 #ifdef MFEM_USE_HIP
1143  {
1144  return hipVectorDot(size, m_data, v_data);
1145  }
1146 #endif
1147 
1148 #ifdef MFEM_USE_OPENMP
1150  {
1151 #define MFEM_USE_OPENMP_DETERMINISTIC_DOT
1152 #ifdef MFEM_USE_OPENMP_DETERMINISTIC_DOT
1153  // By default, use a deterministic way of computing the dot product
1154  static Vector th_dot;
1155  #pragma omp parallel
1156  {
1157  const int nt = omp_get_num_threads();
1158  #pragma omp master
1159  th_dot.SetSize(nt);
1160  const int tid = omp_get_thread_num();
1161  const int stride = (size + nt - 1)/nt;
1162  const int start = tid*stride;
1163  const int stop = std::min(start + stride, size);
1164  double my_dot = 0.0;
1165  for (int i = start; i < stop; i++)
1166  {
1167  my_dot += m_data[i] * v_data[i];
1168  }
1169  #pragma omp barrier
1170  th_dot(tid) = my_dot;
1171  }
1172  return th_dot.Sum();
1173 #else
1174  // The standard way of computing the dot product is non-deterministic
1175  double prod = 0.0;
1176  #pragma omp parallel for reduction(+:prod)
1177  for (int i = 0; i < size; i++)
1178  {
1179  prod += m_data[i] * v_data[i];
1180  }
1181  return prod;
1182 #endif // MFEM_USE_OPENMP_DETERMINISTIC_DOT
1183  }
1184 #endif // MFEM_USE_OPENMP
1186  {
1187  const int N = size;
1188  auto v_data_ = v.Read();
1189  auto m_data_ = Read();
1190  Vector dot(1);
1191  dot.UseDevice(true);
1192  auto d_dot = dot.Write();
1193  dot = 0.0;
1194  MFEM_FORALL(i, N, d_dot[0] += m_data_[i] * v_data_[i];);
1195  dot.HostReadWrite();
1196  return dot[0];
1197  }
1198 vector_dot_cpu:
1199  return operator*(v_data);
1200 }
1201 
1202 double Vector::Min() const
1203 {
1204  if (size == 0) { return infinity(); }
1205 
1206  const bool use_dev = UseDevice();
1207  auto m_data = Read(use_dev);
1208 
1209  if (!use_dev) { goto vector_min_cpu; }
1210 
1211 #ifdef MFEM_USE_OCCA
1212  if (DeviceCanUseOcca())
1213  {
1214  return occa::linalg::min<double,double>(OccaMemoryRead(data, size));
1215  }
1216 #endif
1217 
1218 #ifdef MFEM_USE_CUDA
1220  {
1221  return cuVectorMin(size, m_data);
1222  }
1223 #endif
1224 
1225 #ifdef MFEM_USE_HIP
1227  {
1228  return hipVectorMin(size, m_data);
1229  }
1230 #endif
1231 
1232 #ifdef MFEM_USE_OPENMP
1234  {
1235  double minimum = m_data[0];
1236  #pragma omp parallel for reduction(min:minimum)
1237  for (int i = 0; i < size; i++)
1238  {
1239  minimum = std::min(minimum, m_data[i]);
1240  }
1241  return minimum;
1242  }
1243 #endif
1244 
1246  {
1247  const int N = size;
1248  auto m_data_ = Read();
1249  Vector min(1);
1250  min = infinity();
1251  min.UseDevice(true);
1252  auto d_min = min.ReadWrite();
1253  MFEM_FORALL(i, N, d_min[0] = (d_min[0]<m_data_[i])?d_min[0]:m_data_[i];);
1254  min.HostReadWrite();
1255  return min[0];
1256  }
1257 
1258 vector_min_cpu:
1259  double minimum = data[0];
1260  for (int i = 1; i < size; i++)
1261  {
1262  if (m_data[i] < minimum)
1263  {
1264  minimum = m_data[i];
1265  }
1266  }
1267  return minimum;
1268 }
1269 
1270 }
Hash function for data sequences.
Definition: hash.hpp:455
int Size() const
Return the logical size of the array.
Definition: array.hpp:138
void SetSubVector(const Array< int > &dofs, const double value)
Set the entries listed in dofs to the given value.
Definition: vector.cpp:573
void SetVector(const Vector &v, int offset)
Definition: vector.cpp:279
Memory< T > & GetMemory()
Return a reference to the Memory object used by the Array.
Definition: array.hpp:117
Memory< double > data
Definition: vector.hpp:64
double & Elem(int i)
Access Vector entries. Index i = 0 .. size-1.
Definition: vector.cpp:101
Device memory; using CUDA or HIP *Malloc and *Free.
void SetSize(int s)
Resize the vector to size s.
Definition: vector.hpp:513
double Norml2() const
Returns the l2 norm of the vector.
Definition: vector.cpp:812
double & operator()(int i)
Access Vector entries using () for 0-based indexing.
Definition: vector.hpp:599
void SetGlobalSeed(int gseed)
Set global seed for random values in sequential calls to Randomize().
Definition: vector.cpp:806
Biwise-OR of all HIP backends.
Definition: device.hpp:90
void GetSubVector(const Array< int > &dofs, Vector &elemvect) const
Extract entries listed in dofs to the output Vector elemvect.
Definition: vector.cpp:547
virtual double * HostWrite()
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), false).
Definition: vector.hpp:461
int Size() const
Returns the size of the vector.
Definition: vector.hpp:200
T * Write(MemoryClass mc, int size)
Get write-only access to the memory with the given MemoryClass.
bool UseDevice() const
Return the device flag of the Memory object used by the Array.
Definition: array.hpp:123
std::string GetHash() const
Return the hash string for the current sequence and reset (clear) the sequence.
Definition: hash.cpp:60
double Normlinf() const
Returns the l_infinity norm of the vector.
Definition: vector.cpp:830
void CopyFrom(const Memory &src, int size)
Copy size entries from src to *this.
void Randomize(int seed=0)
Set random values in the vector.
Definition: vector.cpp:785
virtual void UseDevice(bool use_dev) const
Enable execution of Vector operations using the mfem::Device.
Definition: vector.hpp:118
void add(const Vector &v1, const Vector &v2, Vector &v)
Definition: vector.cpp:313
HashFunction & AppendDoubles(const double *doubles, size_t num_doubles)
Add a sequence of doubles for hashing, given as a c-array.
Definition: hash.hpp:508
double operator*(const double *) const
Dot product with a double * array.
Definition: vector.cpp:111
MemoryType GetMemoryType() const
Return a MemoryType that is currently valid. If both the host and the device pointers are currently v...
T ZeroSubnormal(T val)
Definition: vector.hpp:477
void AddSubVector(const Vector &v, int offset)
Definition: vector.cpp:292
Vector & operator=(const double *v)
Copy Size() entries from v.
Definition: vector.cpp:124
void Load(std::istream **in, int np, int *dim)
Reads a vector from multiple files.
Definition: vector.cpp:57
double Normlp(double p) const
Returns the l_p norm of the vector.
Definition: vector.cpp:852
void CopyFromHost(const T *src, int size)
Copy size entries from the host pointer src to *this.
bool DeviceCanUseOcca()
Function that determines if an OCCA kernel should be used, based on the current mfem::Device configur...
Definition: occa.hpp:69
virtual double * Write(bool on_dev=true)
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), on_dev).
Definition: vector.hpp:457
double b
Definition: lissajous.cpp:42
static MemoryType GetDeviceMemoryType()
Get the current Device MemoryType. This is the MemoryType used by most MFEM classes when allocating m...
Definition: device.hpp:273
const occa::memory OccaMemoryRead(const Memory< T > &mem, size_t size)
Wrap a Memory object as occa::memory for read only access with the mfem::Device MemoryClass. The returned occa::memory is associated with the default occa::device used by MFEM.
Definition: occa.hpp:37
void median(const Vector &lo, const Vector &hi)
v = median(v,lo,hi) entrywise. Implementation assumes lo &lt;= hi.
Definition: vector.cpp:523
Biwise-OR of all OpenMP backends.
Definition: device.hpp:92
const T * Read(bool on_dev=true) const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), on_dev).
Definition: array.hpp:304
MFEM_HOST_DEVICE double Norml2(const int size, const T *data)
Returns the l2 norm of the Vector with given size and data.
Definition: kernels.hpp:133
void AddElementVector(const Array< int > &dofs, const Vector &elemvect)
Add elements of the elemvect Vector to the entries listed in dofs. Negative dof values cause the -dof...
Definition: vector.cpp:639
virtual const double * HostRead() const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), false).
Definition: vector.hpp:453
Vector & operator/=(double c)
Definition: vector.cpp:188
Biwise-OR of all CUDA backends.
Definition: device.hpp:88
void SetSubVectorComplement(const Array< int > &dofs, const double val)
Set all vector entries NOT in the dofs Array to the given val.
Definition: vector.cpp:708
Vector & operator*=(double c)
Definition: vector.cpp:167
double Min() const
Returns the minimal element of the vector.
Definition: vector.cpp:1202
Vector & operator+=(double c)
Definition: vector.cpp:231
double Norml1() const
Returns the l_1 norm of the vector.
Definition: vector.cpp:841
static MemoryType GetHostMemoryType()
Get the current Host MemoryType. This is the MemoryType used by most MFEM classes when allocating mem...
Definition: device.hpp:264
void Print(std::ostream &out=mfem::out, int width=8) const
Prints vector to stream out.
Definition: vector.cpp:724
void subtract(const Vector &x, const Vector &y, Vector &z)
Definition: vector.cpp:460
void SetSize(int nsize)
Change the logical size of the array, keep existing entries.
Definition: array.hpp:679
void Print_HYPRE(std::ostream &out) const
Prints vector to stream out in HYPRE_Vector format.
Definition: vector.cpp:758
static bool Allows(unsigned long b_mask)
Return true if any of the backends in the backend mask, b_mask, are allowed.
Definition: device.hpp:258
Vector & Set(const double a, const Vector &x)
(*this) = a * x
Definition: vector.cpp:267
double a
Definition: lissajous.cpp:41
virtual double * ReadWrite(bool on_dev=true)
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), on_dev).
Definition: vector.hpp:465
Vector & Add(const double a, const Vector &Va)
(*this) += a * Va
Definition: vector.cpp:252
Host memory; using new[] and delete[].
double Max() const
Returns the maximal element of the vector.
Definition: vector.cpp:902
void New(int size)
Allocate host memory for size entries with the current host memory type returned by MemoryManager::Ge...
bool Empty() const
Return true if the Memory object is empty, see Reset().
T * ReadWrite(MemoryClass mc, int size)
Get read-write access to the memory with the given MemoryClass.
int dim
Definition: ex24.cpp:53
Vector & operator-=(double c)
Definition: vector.cpp:210
bool global_seed_set
Definition: vector.hpp:66
double infinity()
Define a shortcut for std::numeric_limits&lt;double&gt;::infinity()
Definition: vector.hpp:46
const double alpha
Definition: ex15.cpp:369
Vector data type.
Definition: vector.hpp:60
virtual bool UseDevice() const
Return the device flag of the Memory object used by the Vector.
Definition: vector.hpp:121
void PrintHash(std::ostream &out) const
Print the Vector size and hash of its data.
Definition: vector.cpp:777
RefCoord s[3]
const T * Read(MemoryClass mc, int size) const
Get read-only access to the memory with the given MemoryClass.
double Sum() const
Return the sum of the vector entries.
Definition: vector.cpp:920
virtual const double * Read(bool on_dev=true) const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), on_dev).
Definition: vector.hpp:449
virtual double * HostReadWrite()
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), false).
Definition: vector.hpp:469
[device] Debug backend: host memory is READ/WRITE protected while a device is in use. It allows to test the &quot;device&quot; code-path (using separate host/device memory pools and host &lt;-&gt; device transfers) without any GPU hardware. As &#39;DEBUG&#39; is sometimes used as a macro, _DEVICE has been added to avoid conflicts.
Definition: device.hpp:75
void Neg()
(*this) = -(*this)
Definition: vector.cpp:305