Sacado Package Browser (Single Doxygen Collection) Version of the Day
Loading...
Searching...
No Matches
Sacado_Fad_Exp_Atomic.hpp
Go to the documentation of this file.
1// @HEADER
2// ***********************************************************************
3//
4// Sacado Package
5// Copyright (2006) Sandia Corporation
6//
7// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
8// the U.S. Government retains certain rights in this software.
9//
10// This library is free software; you can redistribute it and/or modify
11// it under the terms of the GNU Lesser General Public License as
12// published by the Free Software Foundation; either version 2.1 of the
13// License, or (at your option) any later version.
14//
15// This library is distributed in the hope that it will be useful, but
16// WITHOUT ANY WARRANTY; without even the implied warranty of
17// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18// Lesser General Public License for more details.
19//
20// You should have received a copy of the GNU Lesser General Public
21// License along with this library; if not, write to the Free Software
22// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
23// USA
24// Questions? Contact David M. Gay (dmgay@sandia.gov) or Eric T. Phipps
25// (etphipp@sandia.gov).
26//
27// ***********************************************************************
28// @HEADER
29
30#ifndef SACADO_FAD_EXP_ATOMIC_HPP
31#define SACADO_FAD_EXP_ATOMIC_HPP
32
33#include "Sacado_ConfigDefs.h"
34#if defined(HAVE_SACADO_KOKKOSCORE)
35
37#include "Kokkos_Atomic.hpp"
38#include "impl/Kokkos_Error.hpp"
39
40namespace Sacado {
41
42 namespace Fad {
43 namespace Exp {
44
45 // Overload of Kokkos::atomic_add for ViewFad types.
46 template <typename ValT, unsigned sl, unsigned ss, typename U, typename T>
48 void atomic_add(ViewFadPtr<ValT,sl,ss,U> dst, const Expr<T>& xx) {
49 using Kokkos::atomic_add;
50
51 const typename Expr<T>::derived_type& x = xx.derived();
52
53 const int xsz = x.size();
54 const int sz = dst->size();
55
56 // We currently cannot handle resizing since that would need to be
57 // done atomically.
58 if (xsz > sz)
59 Kokkos::abort(
60 "Sacado error: Fad resize within atomic_add() not supported!");
61
62 if (xsz != sz && sz > 0 && xsz > 0)
63 Kokkos::abort(
64 "Sacado error: Fad assignment of incompatiable sizes!");
65
66
67 if (sz > 0 && xsz > 0) {
69 atomic_add(&(dst->fastAccessDx(i)), x.fastAccessDx(i));
70 }
72 atomic_add(&(dst->val()), x.val());
73 }
74
75 namespace Impl {
76 // Our implementation of Kokkos::atomic_oper_fetch() and
77 // Kokkos::atomic_fetch_oper() for Sacado types on host
78 template <typename Oper, typename DestPtrT, typename ValT, typename T>
79 typename Sacado::BaseExprType< Expr<T> >::type
80 atomic_oper_fetch_host(const Oper& op, DestPtrT dest, ValT* dest_val,
81 const Expr<T>& x)
82 {
83 typedef typename Sacado::BaseExprType< Expr<T> >::type return_type;
84 const typename Expr<T>::derived_type& val = x.derived();
85
86#ifdef KOKKOS_INTERNAL_NOT_PARALLEL
87 auto scope = desul::MemoryScopeCaller();
88#else
89 auto scope = desul::MemoryScopeDevice();
90#endif
91
92 while (!desul::Impl::lock_address((void*)dest_val, scope))
93 ;
94 desul::atomic_thread_fence(desul::MemoryOrderAcquire(), scope);
95 return_type return_val = op.apply(*dest, val);
96 *dest = return_val;
97 desul::atomic_thread_fence(desul::MemoryOrderRelease(), scope);
98 desul::Impl::unlock_address((void*)dest_val, scope);
99 return return_val;
100 }
101
102 template <typename Oper, typename DestPtrT, typename ValT, typename T>
103 typename Sacado::BaseExprType< Expr<T> >::type
104 atomic_fetch_oper_host(const Oper& op, DestPtrT dest, ValT* dest_val,
105 const Expr<T>& x)
106 {
107 typedef typename Sacado::BaseExprType< Expr<T> >::type return_type;
108 const typename Expr<T>::derived_type& val = x.derived();
109
110#ifdef KOKKOS_INTERNAL_NOT_PARALLEL
111 auto scope = desul::MemoryScopeCaller();
112#else
113 auto scope = desul::MemoryScopeDevice();
114#endif
115
116 while (!desul::Impl::lock_address((void*)dest_val, scope))
117 ;
118 desul::atomic_thread_fence(desul::MemoryOrderAcquire(), scope);
119 return_type return_val = *dest;
120 *dest = op.apply(return_val, val);
121 desul::atomic_thread_fence(desul::MemoryOrderRelease(), scope);
122 desul::Impl::unlock_address((void*)dest_val, scope);
123 return return_val;
124 }
125
126 // Helper function to decide if we are using team-based parallelism
127#if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP)
128 __device__
129 inline bool atomics_use_team() {
130#if defined(SACADO_VIEW_CUDA_HIERARCHICAL) || defined(SACADO_VIEW_CUDA_HIERARCHICAL_DFAD)
131 // It is not allowed to define SACADO_VIEW_CUDA_HIERARCHICAL or
132 // SACADO_VIEW_CUDA_HIERARCHICAL_DFAD and use Sacado inside a team-based
133 // kernel without Sacado hierarchical parallelism. So use the
134 // team-based version only if blockDim.x > 1 (i.e., a team policy)
135 return (blockDim.x > 1);
136#else
137 return false;
138#endif
139 }
140#endif
141
142#if defined(KOKKOS_ENABLE_CUDA)
143
144 // Our implementation of Kokkos::atomic_oper_fetch() and
145 // Kokkos::atomic_fetch_oper() for Sacado types on device
146 template <typename Oper, typename DestPtrT, typename ValT, typename T>
147 __device__
148 typename Sacado::BaseExprType< Expr<T> >::type
149 atomic_oper_fetch_device(const Oper& op, DestPtrT dest, ValT* dest_val,
150 const Expr<T>& x)
151 {
152 typedef typename Sacado::BaseExprType< Expr<T> >::type return_type;
153 const typename Expr<T>::derived_type& val = x.derived();
154
155 auto scope = desul::MemoryScopeDevice();
156
157 if (atomics_use_team()) {
158 int go = 1;
159 while (go) {
160 if (threadIdx.x == 0)
161 go = !desul::Impl::lock_address_cuda((void*)dest_val, scope);
162 go = Kokkos::shfl(go, 0, blockDim.x);
163 }
164 desul::atomic_thread_fence(desul::MemoryOrderAcquire(), scope);
165 return_type return_val = op.apply(*dest, val);
166 *dest = return_val;
167 desul::atomic_thread_fence(desul::MemoryOrderRelease(), scope);
168 if (threadIdx.x == 0)
169 desul::Impl::unlock_address_cuda((void*)dest_val, scope);
170 return return_val;
171 }
172 else {
173 return_type return_val;
174 // This is a way to avoid dead lock in a warp
175 int done = 0;
176 unsigned int mask = __activemask() ;
177 unsigned int active = __ballot_sync(mask, 1);
178 unsigned int done_active = 0;
179 while (active != done_active) {
180 if (!done) {
181 if (desul::Impl::lock_address_cuda((void*)dest_val, scope)) {
182 desul::atomic_thread_fence(desul::MemoryOrderAcquire(), scope);
183 return_val = op.apply(*dest, val);
184 *dest = return_val;
185 desul::atomic_thread_fence(desul::MemoryOrderRelease(), scope);
186 desul::Impl::unlock_address_cuda((void*)dest_val, scope);
187 done = 1;
188 }
189 }
190 done_active = __ballot_sync(mask, done);
191 }
192 return return_val;
193 }
194 }
195
196 template <typename Oper, typename DestPtrT, typename ValT, typename T>
197 __device__
198 typename Sacado::BaseExprType< Expr<T> >::type
199 atomic_fetch_oper_device(const Oper& op, DestPtrT dest, ValT* dest_val,
200 const Expr<T>& x)
201 {
202 typedef typename Sacado::BaseExprType< Expr<T> >::type return_type;
203 const typename Expr<T>::derived_type& val = x.derived();
204
205 auto scope = desul::MemoryScopeDevice();
206
207 if (atomics_use_team()) {
208 int go = 1;
209 while (go) {
210 if (threadIdx.x == 0)
211 go = !desul::Impl::lock_address_cuda((void*)dest_val, scope);
212 go = Kokkos::shfl(go, 0, blockDim.x);
213 }
214 desul::atomic_thread_fence(desul::MemoryOrderAcquire(), scope);
215 return_type return_val = *dest;
216 *dest = op.apply(return_val, val);
217 desul::atomic_thread_fence(desul::MemoryOrderRelease(), scope);
218 if (threadIdx.x == 0)
219 desul::Impl::unlock_address_cuda((void*)dest_val, scope);
220 return return_val;
221 }
222 else {
223 return_type return_val;
224 // This is a way to (hopefully) avoid dead lock in a warp
225 int done = 0;
226 unsigned int mask = __activemask() ;
227 unsigned int active = __ballot_sync(mask, 1);
228 unsigned int done_active = 0;
229 while (active != done_active) {
230 if (!done) {
231 if (desul::Impl::lock_address_cuda((void*)dest_val, scope)) {
232 desul::atomic_thread_fence(desul::MemoryOrderAcquire(), scope);
233 return_val = *dest;
234 *dest = op.apply(return_val, val);
235 desul::atomic_thread_fence(desul::MemoryOrderRelease(), scope);
236 desul::Impl::unlock_address_cuda((void*)dest_val, scope);
237 done = 1;
238 }
239 }
240 done_active = __ballot_sync(mask, done);
241 }
242 return return_val;
243 }
244 }
245
246#elif defined(KOKKOS_ENABLE_HIP)
247
248 // Our implementation of Kokkos::atomic_oper_fetch() and
249 // Kokkos::atomic_fetch_oper() for Sacado types on device
250 template <typename Oper, typename DestPtrT, typename ValT, typename T>
251 __device__
252 typename Sacado::BaseExprType< Expr<T> >::type
253 atomic_oper_fetch_device(const Oper& op, DestPtrT dest, ValT* dest_val,
254 const Expr<T>& x)
255 {
256 typedef typename Sacado::BaseExprType< Expr<T> >::type return_type;
257 const typename Expr<T>::derived_type& val = x.derived();
258
259 auto scope = desul::MemoryScopeDevice();
260
261 if (atomics_use_team()) {
262 int go = 1;
263 while (go) {
264 if (threadIdx.x == 0)
265 go = !desul::Impl::lock_address_hip((void*)dest_val, scope);
266 go = Kokkos::Experimental::shfl(go, 0, blockDim.x);
267 }
268 desul::atomic_thread_fence(desul::MemoryOrderAcquire(), scope);
269 return_type return_val = op.apply(*dest, val);
270 *dest = return_val;
271 desul::atomic_thread_fence(desul::MemoryOrderRelease(), scope);
272 if (threadIdx.x == 0)
273 desul::Impl::unlock_address_hip((void*)dest_val, scope);
274 return return_val;
275 }
276 else {
277 return_type return_val;
278 int done = 0;
279 unsigned int active = __ballot(1);
280 unsigned int done_active = 0;
281 while (active != done_active) {
282 if (!done) {
283 if (desul::Impl::lock_address_hip((void*)dest_val, scope)) {
284 return_val = op.apply(*dest, val);
285 *dest = return_val;
286 desul::Impl::unlock_address_hip((void*)dest_val, scope);
287 done = 1;
288 }
289 }
290 done_active = __ballot(done);
291 }
292 return return_val;
293 }
294 }
295
296 template <typename Oper, typename DestPtrT, typename ValT, typename T>
297 __device__
298 typename Sacado::BaseExprType< Expr<T> >::type
299 atomic_fetch_oper_device(const Oper& op, DestPtrT dest, ValT* dest_val,
300 const Expr<T>& x)
301 {
302 typedef typename Sacado::BaseExprType< Expr<T> >::type return_type;
303 const typename Expr<T>::derived_type& val = x.derived();
304
305 auto scope = desul::MemoryScopeDevice();
306
307 if (atomics_use_team()) {
308 int go = 1;
309 while (go) {
310 if (threadIdx.x == 0)
311 go = !desul::Impl::lock_address_hip((void*)dest_val, scope);
312 go = Kokkos::Experimental::shfl(go, 0, blockDim.x);
313 }
314 desul::atomic_thread_fence(desul::MemoryOrderAcquire(), scope);
315 return_type return_val = *dest;
316 *dest = op.apply(return_val, val);
317 desul:atomic_thread_fence(desul::MemoryOrderRelease(), scope);
318 if (threadIdx.x == 0)
319 desul::Impl::unlock_address_hip((void*)dest_val, scope);
320 return return_val;
321 }
322 else {
323 return_type return_val;
324 int done = 0;
325 unsigned int active = __ballot(1);
326 unsigned int done_active = 0;
327 while (active != done_active) {
328 if (!done) {
329 if (desul::Impl::lock_address_hip((void*)dest_val, scope)) {
330 return_val = *dest;
331 *dest = op.apply(return_val, val);
332 desul::Impl::unlock_address_hip((void*)dest_val, scope);
333 done = 1;
334 }
335 }
336 done_active = __ballot(done);
337 }
338 return return_val;
339 }
340 }
341
342#endif
343
344 // Overloads of Kokkos::atomic_oper_fetch/Kokkos::atomic_fetch_oper
345 // for Sacado types
346 template <typename Oper, typename S>
347 SACADO_INLINE_FUNCTION GeneralFad<S>
348 atomic_oper_fetch(const Oper& op, GeneralFad<S>* dest,
349 const GeneralFad<S>& val)
350 {
351 KOKKOS_IF_ON_HOST(return Impl::atomic_oper_fetch_host(op, dest, &(dest->val()), val);)
352 KOKKOS_IF_ON_DEVICE(return Impl::atomic_oper_fetch_device(op, dest, &(dest->val()), val);)
353 }
354 template <typename Oper, typename ValT, unsigned sl, unsigned ss,
355 typename U, typename T>
357 atomic_oper_fetch(const Oper& op, ViewFadPtr<ValT,sl,ss,U> dest,
358 const Expr<T>& val)
359 {
360 KOKKOS_IF_ON_HOST(return Impl::atomic_oper_fetch_host(op, dest, &dest.val(), val);)
361 KOKKOS_IF_ON_DEVICE(return Impl::atomic_oper_fetch_device(op, dest, &dest.val(), val);)
362 }
363
364 template <typename Oper, typename S>
365 SACADO_INLINE_FUNCTION GeneralFad<S>
366 atomic_fetch_oper(const Oper& op, GeneralFad<S>* dest,
367 const GeneralFad<S>& val)
368 {
369 KOKKOS_IF_ON_HOST(return Impl::atomic_fetch_oper_host(op, dest, &(dest->val()), val);)
370 KOKKOS_IF_ON_DEVICE(return Impl::atomic_fetch_oper_device(op, dest, &(dest->val()), val);)
371 }
372 template <typename Oper, typename ValT, unsigned sl, unsigned ss,
373 typename U, typename T>
375 atomic_fetch_oper(const Oper& op, ViewFadPtr<ValT,sl,ss,U> dest,
376 const Expr<T>& val)
377 {
378 KOKKOS_IF_ON_HOST(return Impl::atomic_fetch_oper_host(op, dest, &dest.val(), val);)
379 KOKKOS_IF_ON_DEVICE(return Impl::atomic_fetch_oper_device(op, dest, &dest.val(), val);)
380 }
381
382 // Our definition of the various Oper classes to be more type-flexible
383 struct MaxOper {
384 template <class Scalar1, class Scalar2>
385 KOKKOS_FORCEINLINE_FUNCTION
386 static auto apply(const Scalar1& val1, const Scalar2& val2)
387 -> decltype(max(val1,val2))
388 {
389 return max(val1,val2);
390 }
391 };
392 struct MinOper {
393 template <class Scalar1, class Scalar2>
394 KOKKOS_FORCEINLINE_FUNCTION
395 static auto apply(const Scalar1& val1, const Scalar2& val2)
396 -> decltype(min(val1,val2))
397 {
398 return min(val1,val2);
399 }
400 };
401 struct AddOper {
402 template <class Scalar1, class Scalar2>
403 KOKKOS_FORCEINLINE_FUNCTION
404 static auto apply(const Scalar1& val1, const Scalar2& val2)
405 -> decltype(val1+val2)
406 {
407 return val1 + val2;
408 }
409 };
410 struct SubOper {
411 template <class Scalar1, class Scalar2>
412 KOKKOS_FORCEINLINE_FUNCTION
413 static auto apply(const Scalar1& val1, const Scalar2& val2)
414 -> decltype(val1-val2)
415 {
416 return val1 - val2;
417 }
418 };
419 struct MulOper {
420 template <class Scalar1, class Scalar2>
421 KOKKOS_FORCEINLINE_FUNCTION
422 static auto apply(const Scalar1& val1, const Scalar2& val2)
423 -> decltype(val1*val2)
424 {
425 return val1 * val2;
426 }
427 };
428 struct DivOper {
429 template <class Scalar1, class Scalar2>
430 KOKKOS_FORCEINLINE_FUNCTION
431 static auto apply(const Scalar1& val1, const Scalar2& val2)
432 -> decltype(val1/val2)
433 {
434 return val1 / val2;
435 }
436 };
437
438 } // Impl
439
440 // Overload of Kokkos::atomic_*_fetch() and Kokkos::atomic_fetch_*()
441 // for Sacado types
442 template <typename S>
443 SACADO_INLINE_FUNCTION GeneralFad<S>
444 atomic_max_fetch(GeneralFad<S>* dest, const GeneralFad<S>& val) {
445 return Impl::atomic_oper_fetch(Impl::MaxOper(), dest, val);
446 }
447 template <typename ValT, unsigned sl, unsigned ss, typename U, typename T>
449 atomic_max_fetch(ViewFadPtr<ValT,sl,ss,U> dest, const Expr<T>& val) {
450 return Impl::atomic_oper_fetch(Impl::MaxOper(), dest, val);
451 }
452 template <typename S>
453 SACADO_INLINE_FUNCTION GeneralFad<S>
454 atomic_min_fetch(GeneralFad<S>* dest, const GeneralFad<S>& val) {
455 return Impl::atomic_oper_fetch(Impl::MinOper(), dest, val);
456 }
457 template <typename ValT, unsigned sl, unsigned ss, typename U, typename T>
459 atomic_min_fetch(ViewFadPtr<ValT,sl,ss,U> dest, const Expr<T>& val) {
460 return Impl::atomic_oper_fetch(Impl::MinOper(), dest, val);
461 }
462 template <typename S>
463 SACADO_INLINE_FUNCTION GeneralFad<S>
464 atomic_add_fetch(GeneralFad<S>* dest, const GeneralFad<S>& val) {
465 return Impl::atomic_oper_fetch(Impl::AddOper(), dest, val);
466 }
467 template <typename ValT, unsigned sl, unsigned ss, typename U, typename T>
469 atomic_add_fetch(ViewFadPtr<ValT,sl,ss,U> dest, const Expr<T>& val) {
470 return Impl::atomic_oper_fetch(Impl::AddOper(), dest, val);
471 }
472 template <typename S>
473 SACADO_INLINE_FUNCTION GeneralFad<S>
474 atomic_sub_fetch(GeneralFad<S>* dest, const GeneralFad<S>& val) {
475 return Impl::atomic_oper_fetch(Impl::SubOper(), dest, val);
476 }
477 template <typename ValT, unsigned sl, unsigned ss, typename U, typename T>
479 atomic_sub_fetch(ViewFadPtr<ValT,sl,ss,U> dest, const Expr<T>& val) {
480 return Impl::atomic_oper_fetch(Impl::SubOper(), dest, val);
481 }
482 template <typename S>
483 SACADO_INLINE_FUNCTION GeneralFad<S>
484 atomic_mul_fetch(GeneralFad<S>* dest, const GeneralFad<S>& val) {
485 return atomic_oper_fetch(Impl::MulOper(), dest, val);
486 }
487 template <typename ValT, unsigned sl, unsigned ss, typename U, typename T>
489 atomic_mul_fetch(ViewFadPtr<ValT,sl,ss,U> dest, const Expr<T>& val) {
490 return Impl::atomic_oper_fetch(Impl::MulOper(), dest, val);
491 }
492 template <typename S>
493 SACADO_INLINE_FUNCTION GeneralFad<S>
494 atomic_div_fetch(GeneralFad<S>* dest, const GeneralFad<S>& val) {
495 return Impl::atomic_oper_fetch(Impl::DivOper(), dest, val);
496 }
497 template <typename ValT, unsigned sl, unsigned ss, typename U, typename T>
499 atomic_div_fetch(ViewFadPtr<ValT,sl,ss,U> dest, const Expr<T>& val) {
500 return Impl::atomic_oper_fetch(Impl::DivOper(), dest, val);
501 }
502
503 template <typename S>
504 SACADO_INLINE_FUNCTION GeneralFad<S>
505 atomic_fetch_max(GeneralFad<S>* dest, const GeneralFad<S>& val) {
506 return Impl::atomic_fetch_oper(Impl::MaxOper(), dest, val);
507 }
508 template <typename ValT, unsigned sl, unsigned ss, typename U, typename T>
510 atomic_fetch_max(ViewFadPtr<ValT,sl,ss,U> dest, const Expr<T>& val) {
511 return Impl::atomic_fetch_oper(Impl::MaxOper(), dest, val);
512 }
513 template <typename S>
514 SACADO_INLINE_FUNCTION GeneralFad<S>
515 atomic_fetch_min(GeneralFad<S>* dest, const GeneralFad<S>& val) {
516 return Impl::atomic_fetch_oper(Impl::MinOper(), dest, val);
517 }
518 template <typename ValT, unsigned sl, unsigned ss, typename U, typename T>
520 atomic_fetch_min(ViewFadPtr<ValT,sl,ss,U> dest, const Expr<T>& val) {
521 return Impl::atomic_fetch_oper(Impl::MinOper(), dest, val);
522 }
523 template <typename S>
524 SACADO_INLINE_FUNCTION GeneralFad<S>
525 atomic_fetch_add(GeneralFad<S>* dest, const GeneralFad<S>& val) {
526 return Impl::atomic_fetch_oper(Impl::AddOper(), dest, val);
527 }
528 template <typename ValT, unsigned sl, unsigned ss, typename U, typename T>
530 atomic_fetch_add(ViewFadPtr<ValT,sl,ss,U> dest, const Expr<T>& val) {
531 return Impl::atomic_fetch_oper(Impl::AddOper(), dest, val);
532 }
533 template <typename S>
534 SACADO_INLINE_FUNCTION GeneralFad<S>
535 atomic_fetch_sub(GeneralFad<S>* dest, const GeneralFad<S>& val) {
536 return Impl::atomic_fetch_oper(Impl::SubOper(), dest, val);
537 }
538 template <typename ValT, unsigned sl, unsigned ss, typename U, typename T>
540 atomic_fetch_sub(ViewFadPtr<ValT,sl,ss,U> dest, const Expr<T>& val) {
541 return Impl::atomic_fetch_oper(Impl::SubOper(), dest, val);
542 }
543 template <typename S>
544 SACADO_INLINE_FUNCTION GeneralFad<S>
545 atomic_fetch_mul(GeneralFad<S>* dest, const GeneralFad<S>& val) {
546 return Impl::atomic_fetch_oper(Impl::MulOper(), dest, val);
547 }
548 template <typename ValT, unsigned sl, unsigned ss, typename U, typename T>
550 atomic_fetch_mul(ViewFadPtr<ValT,sl,ss,U> dest, const Expr<T>& val) {
551 return Impl::atomic_fetch_oper(Impl::MulOper(), dest, val);
552 }
553 template <typename S>
554 SACADO_INLINE_FUNCTION GeneralFad<S>
555 atomic_fetch_div(GeneralFad<S>* dest, const GeneralFad<S>& val) {
556 return Impl::atomic_fetch_oper(Impl::DivOper(), dest, val);
557 }
558 template <typename ValT, unsigned sl, unsigned ss, typename U, typename T>
560 atomic_fetch_div(ViewFadPtr<ValT,sl,ss,U> dest, const Expr<T>& val) {
561 return Impl::atomic_fetch_oper(Impl::DivOper(), dest, val);
562 }
563
564 } // namespace Exp
565 } // namespace Fad
566
567} // namespace Sacado
568
569#endif // HAVE_SACADO_KOKKOSCORE
570#endif // SACADO_FAD_EXP_VIEWFAD_HPP
#define SACADO_INLINE_FUNCTION
#define SACADO_FAD_THREAD_SINGLE
#define SACADO_FAD_DERIV_LOOP(I, SZ)
expr val()
adouble max(const adouble &a, const adouble &b)
adouble min(const adouble &a, const adouble &b)
Get the base Fad type from a view/expression.