Stokhos Package Browser (Single Doxygen Collection)  Version of the Day
MPAssembly/fenl_functors.hpp
Go to the documentation of this file.
1 /*
2 //@HEADER
3 // ************************************************************************
4 //
5 // Kokkos: Manycore Performance-Portable Multidimensional Arrays
6 // Copyright (2012) Sandia Corporation
7 //
8 // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9 // the U.S. Government retains certain rights in this software.
10 //
11 // Redistribution and use in source and binary forms, with or without
12 // modification, are permitted provided that the following conditions are
13 // met:
14 //
15 // 1. Redistributions of source code must retain the above copyright
16 // notice, this list of conditions and the following disclaimer.
17 //
18 // 2. Redistributions in binary form must reproduce the above copyright
19 // notice, this list of conditions and the following disclaimer in the
20 // documentation and/or other materials provided with the distribution.
21 //
22 // 3. Neither the name of the Corporation nor the names of the
23 // contributors may be used to endorse or promote products derived from
24 // this software without specific prior written permission.
25 //
26 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 //
38 // Questions? Contact H. Carter Edwards (hcedwar@sandia.gov)
39 //
40 // ************************************************************************
41 //@HEADER
42 */
43 
44 #ifndef KOKKOS_EXAMPLE_FENLFUNCTORS_HPP
45 #define KOKKOS_EXAMPLE_FENLFUNCTORS_HPP
46 
47 #include <stdio.h>
48 
49 #include <iostream>
50 #include <fstream>
51 #include <iomanip>
52 #include <cstdlib>
53 #include <cmath>
54 #include <limits>
55 
56 #include <Kokkos_Pair.hpp>
57 #include <Kokkos_UnorderedMap.hpp>
58 
59 #include <impl/Kokkos_Timer.hpp>
60 
61 #include <BoxElemFixture.hpp>
62 #include <HexElement.hpp>
63 
65 
66 //----------------------------------------------------------------------------
67 //----------------------------------------------------------------------------
68 
69 namespace Kokkos {
70 namespace Example {
71 namespace FENL {
72 
73 struct DeviceConfig {
74  struct Dim3 {
75  size_t x, y, z;
76  Dim3(const size_t x_, const size_t y_ = 1, const size_t z_ = 1) :
77  x(x_), y(y_), z(z_) {}
78  };
79 
80  Dim3 block_dim;
81  size_t num_blocks;
82  size_t num_threads_per_block;
83 
84  DeviceConfig(const size_t num_blocks_ = 0,
85  const size_t threads_per_block_x_ = 0,
86  const size_t threads_per_block_y_ = 0,
87  const size_t threads_per_block_z_ = 1) :
88  block_dim(threads_per_block_x_,threads_per_block_y_,threads_per_block_z_),
89  num_blocks(num_blocks_),
91  {}
92 };
93 
94 template< typename ValueType , class Space >
95 struct CrsMatrix {
96  typedef Kokkos::StaticCrsGraph< unsigned , Space , void , unsigned > StaticCrsGraphType ;
97  typedef View< ValueType * , Space > values_type ;
98 
101 
102  CrsMatrix() : graph(), values() {}
103 
104  CrsMatrix( const StaticCrsGraphType & arg_graph )
105  : graph( arg_graph )
106  , values( "crs_matrix_values" , arg_graph.entries.dimension_0() )
107  {}
108 };
109 
110 // Traits class for creating strided local views for embedded ensemble-based,
111 // specialized for ensemble UQ scalar type
112 template <typename ViewType, typename Enabled = void>
113 struct LocalViewTraits {
114  typedef ViewType view_type;
115  // typedef Kokkos::View<typename view_type::data_type,
116  // typename view_type::array_layout,
117  // typename view_type::execution_space,
118  // Kokkos::MemoryUnmanaged> local_view_type;
119  typedef const view_type& local_view_type;
121  static const bool use_team = false;
122  KOKKOS_INLINE_FUNCTION
124  const unsigned local_rank)
125  { return v; }
126 };
127 
128 #if defined( KOKKOS_HAVE_CUDA )
129 
130 template <typename ViewType>
131 struct LocalViewTraits<
132  ViewType,
133  typename std::enable_if< std::is_same<typename ViewType::execution_space,
134  Kokkos::Cuda>::value &&
135  Kokkos::is_view_mp_vector<ViewType>::value
136  >::type > {
137  typedef ViewType view_type;
140  static const bool use_team = true;
141 
142  KOKKOS_INLINE_FUNCTION
144  const unsigned local_rank)
145  {
146  return Kokkos::partition<1>(v, local_rank);
147  }
148 };
149 
150 #endif /* #if defined( KOKKOS_HAVE_CUDA ) */
151 
152 // Compute DeviceConfig struct's based on scalar type
153 template <typename ScalarType>
154 struct CreateDeviceConfigs {
155  static void eval( Kokkos::Example::FENL::DeviceConfig& dev_config_elem,
156  Kokkos::Example::FENL::DeviceConfig& dev_config_bc ) {
157  dev_config_elem = Kokkos::Example::FENL::DeviceConfig( 0 , 1 , 1 );
158  dev_config_bc = Kokkos::Example::FENL::DeviceConfig( 0 , 1 , 1 );
159  }
160 };
161 
162 // Compute DeviceConfig struct's based on scalar type
163 template <typename StorageType>
164 struct CreateDeviceConfigs< Sacado::MP::Vector<StorageType> > {
166  static void eval( Kokkos::Example::FENL::DeviceConfig& dev_config_elem,
167  Kokkos::Example::FENL::DeviceConfig& dev_config_bc ) {
168  static const unsigned VectorSize = StorageType::static_size;
169 #if defined( KOKKOS_HAVE_CUDA )
170  enum { is_cuda = Kokkos::Impl::is_same< execution_space, Kokkos::Cuda >::value };
171 #else
172  enum { is_cuda = false };
173 #endif /* #if defined( KOKKOS_HAVE_CUDA ) */
174  if ( is_cuda ) {
175  dev_config_elem = Kokkos::Example::FENL::DeviceConfig( 0 , VectorSize , 64/VectorSize );
176  dev_config_bc = Kokkos::Example::FENL::DeviceConfig( 0 , VectorSize , 256/VectorSize );
177  }
178  else {
179  dev_config_elem = Kokkos::Example::FENL::DeviceConfig( 0 , 1 , 1 );
180  dev_config_bc = Kokkos::Example::FENL::DeviceConfig( 0 , 1 , 1 );
181  }
182  }
183 };
184 
185 template< class ElemNodeIdView , class CrsGraphType , unsigned ElemNode >
186 class NodeNodeGraph {
187 public:
188 
190  typedef pair<unsigned,unsigned> key_type ;
191 
192  typedef Kokkos::UnorderedMap< key_type, void , execution_space > SetType ;
193  typedef typename CrsGraphType::row_map_type::non_const_type RowMapType ;
194  typedef Kokkos::View< unsigned , execution_space > UnsignedValue ;
195 
196  // Static dimensions of 0 generate compiler warnings or errors.
197  typedef Kokkos::View< unsigned*[ElemNode][ElemNode] , execution_space >
199 
200 private:
201 
207 
208  const unsigned node_count ;
209  const ElemNodeIdView elem_node_id ;
214  PhaseType phase ;
215 
216 public:
217 
218  CrsGraphType graph ;
220 
221  struct Times
222  {
223  double ratio;
224  double fill_node_set;
225  double scan_node_count;
226  double fill_graph_entries;
227  double sort_graph_entries;
228  double fill_element_graph;
229  };
230 
231  NodeNodeGraph( const ElemNodeIdView & arg_elem_node_id ,
232  const unsigned arg_node_count,
233  Times & results
234  )
235  : node_count(arg_node_count)
236  , elem_node_id( arg_elem_node_id )
237  , row_total( "row_total" )
238  , row_count(Kokkos::ViewAllocateWithoutInitializing("row_count") , node_count ) // will deep_copy to 0 inside loop
239  , row_map( "graph_row_map" , node_count + 1 )
240  , node_node_set()
241  , phase( FILL_NODE_SET )
242  , graph()
243  , elem_graph()
244  {
245  //--------------------------------
246  // Guess at capacity required for the map:
247 
248  Kokkos::Impl::Timer wall_clock ;
249 
250  wall_clock.reset();
251  phase = FILL_NODE_SET ;
252 
253  // upper bound on the capacity
254  size_t set_capacity = (((28ull * node_count) / 2ull)*4ull)/3ull;
255 
256 
257  // Increase capacity until the (node,node) map is successfully filled.
258  {
259  // Zero the row count to restart the fill
261 
262  node_node_set = SetType( set_capacity );
263 
264  // May be larger that requested:
265  set_capacity = node_node_set.capacity();
266 
267  Kokkos::parallel_for( elem_node_id.dimension_0() , *this );
268  }
269 
270  execution_space::fence();
271  results.ratio = (double)node_node_set.size() / (double)node_node_set.capacity();
272  results.fill_node_set = wall_clock.seconds();
273  //--------------------------------
274 
275  wall_clock.reset();
277 
278  // Exclusive scan of row_count into row_map
279  // including the final total in the 'node_count + 1' position.
280  // Zero the 'row_count' values.
281  Kokkos::parallel_scan( node_count , *this );
282 
283  // Zero the row count for the fill:
285 
286  unsigned graph_entry_count = 0 ;
287 
288  Kokkos::deep_copy( graph_entry_count , row_total );
289 
290  // Assign graph's row_map and allocate graph's entries
291  graph.row_map = row_map ;
292  graph.entries = typename CrsGraphType::entries_type( "graph_entries" , graph_entry_count );
293 
294  //--------------------------------
295  // Fill graph's entries from the (node,node) set.
296 
297  execution_space::fence();
298  results.scan_node_count = wall_clock.seconds();
299 
300  wall_clock.reset();
302  Kokkos::parallel_for( node_node_set.capacity() , *this );
303 
304  execution_space::fence();
305  results.fill_graph_entries = wall_clock.seconds();
306 
307  //--------------------------------
308  // Done with the temporary sets and arrays
309  wall_clock.reset();
311 
313  row_count = RowMapType();
314  row_map = RowMapType();
315  node_node_set.clear();
316 
317  //--------------------------------
318 
319  Kokkos::parallel_for( node_count , *this );
320 
321  execution_space::fence();
322  results.sort_graph_entries = wall_clock.seconds();
323 
324  //--------------------------------
325  // Element-to-graph mapping:
326  wall_clock.reset();
328  elem_graph = ElemGraphType("elem_graph", elem_node_id.dimension_0() );
329  Kokkos::parallel_for( elem_node_id.dimension_0() , *this );
330 
331  execution_space::fence();
332  results.fill_element_graph = wall_clock.seconds();
333  }
334 
335  //------------------------------------
336  // parallel_for: create map and count row length
337 
338  KOKKOS_INLINE_FUNCTION
339  void fill_set( const unsigned ielem ) const
340  {
341  // Loop over element's (row_local_node,col_local_node) pairs:
342  for ( unsigned row_local_node = 0 ; row_local_node < elem_node_id.dimension_1() ; ++row_local_node ) {
343 
344  const unsigned row_node = elem_node_id( ielem , row_local_node );
345 
346  for ( unsigned col_local_node = row_local_node ; col_local_node < elem_node_id.dimension_1() ; ++col_local_node ) {
347 
348  const unsigned col_node = elem_node_id( ielem , col_local_node );
349 
350  // If either node is locally owned then insert the pair into the unordered map:
351 
352  if ( row_node < row_count.dimension_0() || col_node < row_count.dimension_0() ) {
353 
354  const key_type key = (row_node < col_node) ? make_pair( row_node, col_node ) : make_pair( col_node, row_node ) ;
355 
356  const typename SetType::insert_result result = node_node_set.insert( key );
357 
358  if ( result.success() ) {
359  if ( row_node < row_count.dimension_0() ) { atomic_fetch_add( & row_count( row_node ) , 1 ); }
360  if ( col_node < row_count.dimension_0() && col_node != row_node ) { atomic_fetch_add( & row_count( col_node ) , 1 ); }
361  }
362  }
363  }
364  }
365  }
366 
367  KOKKOS_INLINE_FUNCTION
368  void fill_graph_entries( const unsigned iset ) const
369  {
370  if ( node_node_set.valid_at(iset) ) {
371  const key_type key = node_node_set.key_at(iset) ;
372  const unsigned row_node = key.first ;
373  const unsigned col_node = key.second ;
374 
375  if ( row_node < row_count.dimension_0() ) {
376  const unsigned offset = graph.row_map( row_node ) + atomic_fetch_add( & row_count( row_node ) , 1 );
377  graph.entries( offset ) = col_node ;
378  }
379 
380  if ( col_node < row_count.dimension_0() && col_node != row_node ) {
381  const unsigned offset = graph.row_map( col_node ) + atomic_fetch_add( & row_count( col_node ) , 1 );
382  graph.entries( offset ) = row_node ;
383  }
384  }
385  }
386 
387  KOKKOS_INLINE_FUNCTION
388  void sort_graph_entries( const unsigned irow ) const
389  {
390  typedef typename CrsGraphType::size_type size_type;
391  const size_type row_beg = graph.row_map( irow );
392  const size_type row_end = graph.row_map( irow + 1 );
393  for ( size_type i = row_beg + 1 ; i < row_end ; ++i ) {
394  const typename CrsGraphType::data_type col = graph.entries(i);
395  size_type j = i ;
396  for ( ; row_beg < j && col < graph.entries(j-1) ; --j ) {
397  graph.entries(j) = graph.entries(j-1);
398  }
399  graph.entries(j) = col ;
400  }
401  }
402 
403  KOKKOS_INLINE_FUNCTION
404  void fill_elem_graph_map( const unsigned ielem ) const
405  {
406  typedef typename CrsGraphType::data_type entry_type;
407  for ( unsigned row_local_node = 0 ; row_local_node < elem_node_id.dimension_1() ; ++row_local_node ) {
408 
409  const unsigned row_node = elem_node_id( ielem , row_local_node );
410 
411  for ( unsigned col_local_node = 0 ; col_local_node < elem_node_id.dimension_1() ; ++col_local_node ) {
412 
413  const unsigned col_node = elem_node_id( ielem , col_local_node );
414 
415  entry_type entry = 0 ;
416 
417  if ( row_node + 1 < graph.row_map.dimension_0() ) {
418 
419  const entry_type entry_end = static_cast<entry_type> (graph.row_map( row_node + 1 ));
420 
421  entry = graph.row_map( row_node );
422 
423  for ( ; entry < entry_end && graph.entries(entry) != static_cast<entry_type> (col_node) ; ++entry );
424 
425  if ( entry == entry_end ) entry = ~0u ;
426  }
427 
428  elem_graph( ielem , row_local_node , col_local_node ) = entry ;
429  }
430  }
431  }
432 
433  KOKKOS_INLINE_FUNCTION
434  void operator()( const unsigned iwork ) const
435  {
436  if ( phase == FILL_NODE_SET ) {
437  fill_set( iwork );
438  }
439  else if ( phase == FILL_GRAPH_ENTRIES ) {
440  fill_graph_entries( iwork );
441  }
442  else if ( phase == SORT_GRAPH_ENTRIES ) {
443  sort_graph_entries( iwork );
444  }
445  else if ( phase == FILL_ELEMENT_GRAPH ) {
446  fill_elem_graph_map( iwork );
447  }
448  }
449 
450  //------------------------------------
451  // parallel_scan: row offsets
452 
453  typedef unsigned value_type ;
454 
455  KOKKOS_INLINE_FUNCTION
456  void operator()( const unsigned irow , unsigned & update , const bool final ) const
457  {
458  // exclusive scan
459  if ( final ) { row_map( irow ) = update ; }
460 
461  update += row_count( irow );
462 
463  if ( final ) {
464  if ( irow + 1 == row_count.dimension_0() ) {
465  row_map( irow + 1 ) = update ;
466  row_total() = update ;
467  }
468  }
469  }
470 
471  KOKKOS_INLINE_FUNCTION
472  void init( unsigned & update ) const { update = 0 ; }
473 
474  KOKKOS_INLINE_FUNCTION
475  void join( volatile unsigned & update , const volatile unsigned & input ) const { update += input ; }
476 
477  //------------------------------------
478 };
479 
480 } /* namespace FENL */
481 } /* namespace Example */
482 } /* namespace Kokkos */
483 
484 //----------------------------------------------------------------------------
485 //----------------------------------------------------------------------------
486 
487 namespace Kokkos {
488 namespace Example {
489 namespace FENL {
490 
491 struct ElementComputationConstantCoefficient {
492  enum { is_constant = false };
493 
494  const double coeff_k ;
495 
496  KOKKOS_INLINE_FUNCTION
497  double operator()( double pt[], unsigned ensemble_rank) const
498  { return coeff_k * std::sin(pt[0]) * std::sin(pt[1]) * std::sin(pt[2]); }
499 
501  : coeff_k( val ) {}
502 
504  : coeff_k( rhs.coeff_k ) {}
505 };
506 
507 // Exponential KL from Stokhos
508 template < typename Scalar, typename MeshScalar, typename Device >
509 class ExponentialKLCoefficient {
510 public:
511 
512  // Turn into a meta-function class usable with Sacado::mpl::apply
513  template <typename T1, typename T2 = MeshScalar, typename T3 = Device>
514  struct apply {
516  };
517 
518  enum { is_constant = false };
519  typedef Kokkos::View<Scalar*, Kokkos::LayoutLeft, Device> RandomVariableView;
520  typedef typename RandomVariableView::size_type size_type;
521 
526 
527  rf_type m_rf; // Exponential random field
528  const MeshScalar m_mean; // Mean of random field
529  const MeshScalar m_variance; // Variance of random field
530  const MeshScalar m_corr_len; // Correlation length of random field
531  const size_type m_num_rv; // Number of random variables
532  RandomVariableView m_rv; // KL random variables
533 
534 public:
535 
537  const MeshScalar mean ,
538  const MeshScalar variance ,
539  const MeshScalar correlation_length ,
540  const size_type num_rv ) :
541  m_mean( mean ),
542  m_variance( variance ),
543  m_corr_len( correlation_length ),
544  m_num_rv( num_rv ),
545  m_rv( "KL Random Variables", m_num_rv )
546  {
547  Teuchos::ParameterList solverParams;
548  solverParams.set("Number of KL Terms", int(num_rv));
549  solverParams.set("Mean", mean);
550  solverParams.set("Standard Deviation", std::sqrt(variance));
551  int ndim = 3;
552  Teuchos::Array<double> domain_upper(ndim, 1.0), domain_lower(ndim, 0.0),
553  correlation_lengths(ndim, correlation_length);
554  solverParams.set("Domain Upper Bounds", domain_upper);
555  solverParams.set("Domain Lower Bounds", domain_lower);
556  solverParams.set("Correlation Lengths", correlation_lengths);
557 
558  m_rf = rf_type(solverParams);
559  }
560 
562  m_rf( rhs.m_rf ) ,
563  m_mean( rhs.m_mean ) ,
564  m_variance( rhs.m_variance ) ,
565  m_corr_len( rhs.m_corr_len ) ,
566  m_num_rv( rhs.m_num_rv ) ,
567  m_rv( rhs.m_rv ) {}
568 
569  KOKKOS_INLINE_FUNCTION
570  void setRandomVariables( const RandomVariableView& rv) { m_rv = rv; }
571 
572  KOKKOS_INLINE_FUNCTION
574 
575  KOKKOS_INLINE_FUNCTION
576  local_scalar_type operator() ( const MeshScalar point[],
577  const size_type ensemble_rank ) const
578  {
579  local_rv_view_type local_rv =
581 
582  local_scalar_type val = m_rf.evaluate(point, local_rv);
583 
584  return val;
585  }
586 };
587 
588 template< class FiniteElementMeshType , class SparseMatrixType
589  , class CoeffFunctionType = ElementComputationConstantCoefficient
590  >
591 class ElementComputation ;
592 
593 
594 template< class ExecutionSpace , BoxElemPart::ElemOrder Order , class CoordinateMap ,
595  typename ScalarType , class CoeffFunctionType >
597  < Kokkos::Example::BoxElemFixture< ExecutionSpace , Order , CoordinateMap >
598  , Kokkos::Example::FENL::CrsMatrix< ScalarType , ExecutionSpace >
599  , CoeffFunctionType >
600 {
601 public:
602 
605 
606  //------------------------------------
607 
608  typedef ExecutionSpace execution_space ;
609  typedef ScalarType scalar_type ;
610 
614  typedef Kokkos::View< scalar_type* , Kokkos::LayoutLeft, execution_space > vector_type ;
615 
616  //------------------------------------
617 
623  static const bool use_team = local_vector_view_traits::use_team;
624 
625  static const unsigned SpatialDim = element_data_type::spatial_dimension ;
626  static const unsigned TensorDim = SpatialDim * SpatialDim ;
627  static const unsigned ElemNodeCount = element_data_type::element_node_count ;
628  static const unsigned FunctionCount = element_data_type::function_count ;
629  static const unsigned IntegrationCount = element_data_type::integration_count ;
630 
631  //------------------------------------
632 
635  typedef Kokkos::View< scalar_type*[FunctionCount][FunctionCount] , execution_space > elem_matrices_type ;
636  typedef Kokkos::View< scalar_type*[FunctionCount] , execution_space > elem_vectors_type ;
637 
642 
644 
645  //------------------------------------
646 
647 
648  //------------------------------------
649  // Computational data:
650 
660  const CoeffFunctionType coeff_function ;
662 
664  : elem_data()
665  , elem_node_ids( rhs.elem_node_ids )
666  , node_coords( rhs.node_coords )
667  , elem_graph( rhs.elem_graph )
668  , elem_jacobians( rhs.elem_jacobians )
669  , elem_residuals( rhs.elem_residuals )
670  , solution( rhs.solution )
671  , residual( rhs.residual )
672  , jacobian( rhs.jacobian )
673  , coeff_function( rhs.coeff_function )
674  , dev_config( rhs.dev_config )
675  {}
676 
677  // If the element->sparse_matrix graph is provided then perform atomic updates
678  // Otherwise fill per-element contributions for subequent gather-add into a residual and jacobian.
679  ElementComputation( const mesh_type & arg_mesh ,
680  const CoeffFunctionType & arg_coeff_function ,
681  const vector_type & arg_solution ,
682  const elem_graph_type & arg_elem_graph ,
683  const sparse_matrix_type & arg_jacobian ,
684  const vector_type & arg_residual ,
685  const Kokkos::Example::FENL::DeviceConfig arg_dev_config )
686  : elem_data()
687  , elem_node_ids( arg_mesh.elem_node() )
688  , node_coords( arg_mesh.node_coord() )
689  , elem_graph( arg_elem_graph )
690  , elem_jacobians()
691  , elem_residuals()
692  , solution( arg_solution )
693  , residual( arg_residual )
694  , jacobian( arg_jacobian )
695  , coeff_function( arg_coeff_function )
696  , dev_config( arg_dev_config )
697  {}
698 
699  //------------------------------------
700 
701  void apply() const
702  {
703  const size_t nelem = elem_node_ids.dimension_0();
704  if ( use_team ) {
705  const size_t team_size = dev_config.block_dim.x * dev_config.block_dim.y;
706  const size_t league_size =
707  (nelem + dev_config.block_dim.y-1) / dev_config.block_dim.y;
708  Kokkos::TeamPolicy< execution_space > config( league_size, team_size );
709  parallel_for( config , *this );
710  }
711  else {
712  parallel_for( nelem , *this );
713  }
714  }
715 
716  //------------------------------------
717 
718  static const unsigned FLOPS_transform_gradients =
719  /* Jacobian */ FunctionCount * TensorDim * 2 +
720  /* Inverse jacobian */ TensorDim * 6 + 6 +
721  /* Gradient transform */ FunctionCount * 15 ;
722 
723  KOKKOS_INLINE_FUNCTION
725  const double grad[][ FunctionCount ] , // Gradient of bases master element
726  const double x[] ,
727  const double y[] ,
728  const double z[] ,
729  double dpsidx[] ,
730  double dpsidy[] ,
731  double dpsidz[] ) const
732  {
733  enum { j11 = 0 , j12 = 1 , j13 = 2 ,
734  j21 = 3 , j22 = 4 , j23 = 5 ,
735  j31 = 6 , j32 = 7 , j33 = 8 };
736 
737  // Jacobian accumulation:
738 
739  double J[ TensorDim ] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
740 
741  for( unsigned i = 0; i < FunctionCount ; ++i ) {
742  const double x1 = x[i] ;
743  const double x2 = y[i] ;
744  const double x3 = z[i] ;
745 
746  const double g1 = grad[0][i] ;
747  const double g2 = grad[1][i] ;
748  const double g3 = grad[2][i] ;
749 
750  J[j11] += g1 * x1 ;
751  J[j12] += g1 * x2 ;
752  J[j13] += g1 * x3 ;
753 
754  J[j21] += g2 * x1 ;
755  J[j22] += g2 * x2 ;
756  J[j23] += g2 * x3 ;
757 
758  J[j31] += g3 * x1 ;
759  J[j32] += g3 * x2 ;
760  J[j33] += g3 * x3 ;
761  }
762 
763  // Inverse jacobian:
764 
765  double invJ[ TensorDim ] = {
766  static_cast<double>( J[j22] * J[j33] - J[j23] * J[j32] ) ,
767  static_cast<double>( J[j13] * J[j32] - J[j12] * J[j33] ) ,
768  static_cast<double>( J[j12] * J[j23] - J[j13] * J[j22] ) ,
769 
770  static_cast<double>( J[j23] * J[j31] - J[j21] * J[j33] ) ,
771  static_cast<double>( J[j11] * J[j33] - J[j13] * J[j31] ) ,
772  static_cast<double>( J[j13] * J[j21] - J[j11] * J[j23] ) ,
773 
774  static_cast<double>( J[j21] * J[j32] - J[j22] * J[j31] ) ,
775  static_cast<double>( J[j12] * J[j31] - J[j11] * J[j32] ) ,
776  static_cast<double>( J[j11] * J[j22] - J[j12] * J[j21] ) };
777 
778  const double detJ = J[j11] * invJ[j11] +
779  J[j21] * invJ[j12] +
780  J[j31] * invJ[j13] ;
781 
782  const double detJinv = 1.0 / detJ ;
783 
784  for ( unsigned i = 0 ; i < TensorDim ; ++i ) { invJ[i] *= detJinv ; }
785 
786  // Transform gradients:
787 
788  for( unsigned i = 0; i < FunctionCount ; ++i ) {
789  const double g0 = grad[0][i];
790  const double g1 = grad[1][i];
791  const double g2 = grad[2][i];
792 
793  dpsidx[i] = g0 * invJ[j11] + g1 * invJ[j12] + g2 * invJ[j13];
794  dpsidy[i] = g0 * invJ[j21] + g1 * invJ[j22] + g2 * invJ[j23];
795  dpsidz[i] = g0 * invJ[j31] + g1 * invJ[j32] + g2 * invJ[j33];
796  }
797 
798  return detJ ;
799  }
800 
801  KOKKOS_INLINE_FUNCTION
803  const local_scalar_type dof_values[] ,
804  const double dpsidx[] ,
805  const double dpsidy[] ,
806  const double dpsidz[] ,
807  const double detJ ,
808  const local_scalar_type coeff_k ,
809  const double integ_weight ,
810  const double bases_vals[] ,
811  local_scalar_type elem_res[] ,
812  local_scalar_type elem_mat[][ FunctionCount ] ) const
813  {
814  local_scalar_type value_at_pt = 0 ;
815  local_scalar_type gradx_at_pt = 0 ;
816  local_scalar_type grady_at_pt = 0 ;
817  local_scalar_type gradz_at_pt = 0 ;
818 
819  for ( unsigned m = 0 ; m < FunctionCount ; m++ ) {
820  value_at_pt += dof_values[m] * bases_vals[m] ;
821  gradx_at_pt += dof_values[m] * dpsidx[m] ;
822  grady_at_pt += dof_values[m] * dpsidy[m] ;
823  gradz_at_pt += dof_values[m] * dpsidz[m] ;
824  }
825 
826  const local_scalar_type k_detJ_weight = coeff_k * detJ * integ_weight ;
827  const local_scalar_type res_val = value_at_pt * value_at_pt * detJ * integ_weight ;
828  const local_scalar_type mat_val = 2.0 * value_at_pt * detJ * integ_weight ;
829 
830  // $$ R_i = \int_{\Omega} \nabla \phi_i \cdot (k \nabla T) + \phi_i T^2 d \Omega $$
831  // $$ J_{i,j} = \frac{\partial R_i}{\partial T_j} = \int_{\Omega} k \nabla \phi_i \cdot \nabla \phi_j + 2 \phi_i \phi_j T d \Omega $$
832 
833  for ( unsigned m = 0; m < FunctionCount; ++m) {
834  local_scalar_type * const mat = elem_mat[m] ;
835  const double bases_val_m = bases_vals[m];
836  const double dpsidx_m = dpsidx[m] ;
837  const double dpsidy_m = dpsidy[m] ;
838  const double dpsidz_m = dpsidz[m] ;
839 
840  elem_res[m] += k_detJ_weight * ( dpsidx_m * gradx_at_pt +
841  dpsidy_m * grady_at_pt +
842  dpsidz_m * gradz_at_pt ) +
843  res_val * bases_val_m ;
844 
845  for( unsigned n = 0; n < FunctionCount; n++) {
846 
847  mat[n] += k_detJ_weight * ( dpsidx_m * dpsidx[n] +
848  dpsidy_m * dpsidy[n] +
849  dpsidz_m * dpsidz[n] ) +
850  mat_val * bases_val_m * bases_vals[n];
851  }
852  }
853  }
854 
855  KOKKOS_INLINE_FUNCTION
856  void operator()( const typename TeamPolicy< execution_space >::member_type & dev ) const
857  {
858 
859  const unsigned num_ensemble_threads = dev_config.block_dim.x ;
860  const unsigned num_element_threads = dev_config.block_dim.y ;
861  const unsigned element_rank = dev.team_rank() / num_ensemble_threads ;
862  const unsigned ensemble_rank = dev.team_rank() % num_ensemble_threads ;
863 
864  const unsigned ielem =
865  dev.league_rank() * num_element_threads + element_rank;
866 
867  if (ielem >= elem_node_ids.dimension_0())
868  return;
869 
870  (*this)( ielem, ensemble_rank );
871 
872  }
873 
874  KOKKOS_INLINE_FUNCTION
875  void operator()( const unsigned ielem ,
876  const unsigned ensemble_rank = 0 ) const
877  {
878  local_vector_type local_solution =
879  local_vector_view_traits::create_local_view(solution,
880  ensemble_rank);
881  local_vector_type local_residual =
882  local_vector_view_traits::create_local_view(residual,
883  ensemble_rank);
884  local_matrix_type local_jacobian_values =
885  local_matrix_view_traits::create_local_view(jacobian.values,
886  ensemble_rank);
887 
888  // Gather nodal coordinates and solution vector:
889 
890  double x[ FunctionCount ] ;
891  double y[ FunctionCount ] ;
892  double z[ FunctionCount ] ;
893  local_scalar_type val[ FunctionCount ] ;
894  unsigned node_index[ ElemNodeCount ];
895 
896  for ( unsigned i = 0 ; i < ElemNodeCount ; ++i ) {
897  const unsigned ni = elem_node_ids( ielem , i );
898 
899  node_index[i] = ni ;
900 
901  x[i] = node_coords( ni , 0 );
902  y[i] = node_coords( ni , 1 );
903  z[i] = node_coords( ni , 2 );
904 
905  val[i] = local_solution( ni );
906  }
907 
908 
909  local_scalar_type elem_vec[ FunctionCount ] ;
910  local_scalar_type elem_mat[ FunctionCount ][ FunctionCount ] ;
911 
912  for( unsigned i = 0; i < FunctionCount ; i++ ) {
913  elem_vec[i] = 0 ;
914  for( unsigned j = 0; j < FunctionCount ; j++){
915  elem_mat[i][j] = 0 ;
916  }
917  }
918 
919 
920  for ( unsigned i = 0 ; i < IntegrationCount ; ++i ) {
921  double dpsidx[ FunctionCount ] ;
922  double dpsidy[ FunctionCount ] ;
923  double dpsidz[ FunctionCount ] ;
924 
925  local_scalar_type coeff_k = 0 ;
926 
927  {
928  double pt[] = {0.0, 0.0, 0.0};
929 
930  // If function is not constant
931  // then compute physical coordinates of integration point
932  if ( ! coeff_function.is_constant ) {
933  for ( unsigned j = 0 ; j < FunctionCount ; ++j ) {
934  pt[0] += x[j] * elem_data.values[i][j] ;
935  pt[1] += y[j] * elem_data.values[i][j] ;
936  pt[2] += z[j] * elem_data.values[i][j] ;
937  }
938  }
939 
940  // Need to fix this for local_scalar_type!!!!!!
941  coeff_k = coeff_function(pt, ensemble_rank);
942  }
943 
944  const double detJ =
945  transform_gradients( elem_data.gradients[i] , x , y , z ,
946  dpsidx , dpsidy , dpsidz );
947 
948  contributeResidualJacobian( val , dpsidx , dpsidy , dpsidz ,
949  detJ , coeff_k ,
950  elem_data.weights[i] ,
951  elem_data.values[i] ,
952  elem_vec , elem_mat );
953  }
954 
955  for( unsigned i = 0 ; i < FunctionCount ; i++ ) {
956  const unsigned row = node_index[i] ;
957  if ( row < residual.dimension_0() ) {
958  atomic_add( & local_residual( row ) , elem_vec[i] );
959 
960  for( unsigned j = 0 ; j < FunctionCount ; j++ ) {
961  const unsigned entry = elem_graph( ielem , i , j );
962  if ( entry != ~0u ) {
963  atomic_add( & local_jacobian_values( entry ) , elem_mat[i][j] );
964  }
965  }
966  }
967  }
968  }
969 }; /* ElementComputation */
970 
971 //----------------------------------------------------------------------------
972 
973 template< class FixtureType , class SparseMatrixType >
974 class DirichletComputation ;
975 
976 template< class ExecutionSpace , BoxElemPart::ElemOrder Order , class CoordinateMap ,
977  typename ScalarType >
978 class DirichletComputation<
979  Kokkos::Example::BoxElemFixture< ExecutionSpace , Order , CoordinateMap > ,
980  Kokkos::Example::FENL::CrsMatrix< ScalarType , ExecutionSpace > >
981 {
982 public:
983 
987 
988  typedef ExecutionSpace execution_space ;
989  typedef ScalarType scalar_type ;
990 
994  typedef Kokkos::View< scalar_type* , execution_space > vector_type ;
995 
996  //------------------------------------
997 
1002  static const bool use_team = local_vector_view_traits::use_team;
1003 
1004  typedef double bc_scalar_type ;
1005 
1006  //------------------------------------
1007  // Computational data:
1008 
1009  const node_coord_type node_coords ;
1010  const vector_type solution ;
1011  const sparse_matrix_type jacobian ;
1012  const vector_type residual ;
1013  const bc_scalar_type bc_lower_value ;
1014  const bc_scalar_type bc_upper_value ;
1015  const scalar_coord_type bc_lower_limit ;
1016  const scalar_coord_type bc_upper_limit ;
1017  const unsigned bc_plane ;
1018  const unsigned node_count ;
1019  bool init ;
1020  const Kokkos::Example::FENL::DeviceConfig dev_config ;
1021 
1022 
1023  DirichletComputation( const mesh_type & arg_mesh ,
1024  const vector_type & arg_solution ,
1025  const sparse_matrix_type & arg_jacobian ,
1026  const vector_type & arg_residual ,
1027  const unsigned arg_bc_plane ,
1028  const bc_scalar_type arg_bc_lower_value ,
1029  const bc_scalar_type arg_bc_upper_value ,
1030  const Kokkos::Example::FENL::DeviceConfig arg_dev_config )
1031  : node_coords( arg_mesh.node_coord() )
1032  , solution( arg_solution )
1033  , jacobian( arg_jacobian )
1034  , residual( arg_residual )
1035  , bc_lower_value( arg_bc_lower_value )
1036  , bc_upper_value( arg_bc_upper_value )
1037  , bc_lower_limit( std::numeric_limits<scalar_coord_type>::epsilon() )
1038  , bc_upper_limit( scalar_coord_type(1) - std::numeric_limits<scalar_coord_type>::epsilon() )
1039  , bc_plane( arg_bc_plane )
1040  , node_count( arg_mesh.node_count_owned() )
1041  , init( false )
1042  , dev_config( arg_dev_config )
1043  {
1044  parallel_for( node_count , *this );
1045  init = true ;
1046  }
1047 
1048  void apply() const
1049  {
1050  if ( use_team ) {
1051  const size_t team_size = dev_config.block_dim.x * dev_config.block_dim.y;
1052  const size_t league_size =
1053  (node_count + dev_config.block_dim.y-1) / dev_config.block_dim.y;
1054  Kokkos::TeamPolicy< execution_space > config( league_size, team_size );
1055  parallel_for( config , *this );
1056  }
1057  else
1058  parallel_for( node_count , *this );
1059  }
1060 
1061  //------------------------------------
1062 
1063  KOKKOS_INLINE_FUNCTION
1064  void operator()( const typename TeamPolicy< execution_space >::member_type & dev ) const
1065  {
1066 
1067  const unsigned num_ensemble_threads = dev_config.block_dim.x ;
1068  const unsigned num_node_threads = dev_config.block_dim.y ;
1069  const unsigned node_rank = dev.team_rank() / num_ensemble_threads ;
1070  const unsigned ensemble_rank = dev.team_rank() % num_ensemble_threads ;
1071 
1072  const unsigned inode = dev.league_rank() * num_node_threads + node_rank;
1073 
1074  if (inode >= node_count)
1075  return;
1076 
1077  (*this)( inode, ensemble_rank );
1078 
1079  }
1080 
1081  KOKKOS_INLINE_FUNCTION
1082  void operator()( const unsigned inode ,
1083  const unsigned ensemble_rank = 0) const
1084  {
1085  local_vector_type local_residual =
1086  local_vector_view_traits::create_local_view(residual,
1087  ensemble_rank);
1088  local_matrix_type local_jacobian_values =
1089  local_matrix_view_traits::create_local_view(jacobian.values,
1090  ensemble_rank);
1091 
1092  // Apply dirichlet boundary condition on the Solution and Residual vectors.
1093  // To maintain the symmetry of the original global stiffness matrix,
1094  // zero out the columns that correspond to boundary conditions, and
1095  // update the residual vector accordingly
1096 
1097  const unsigned iBeg = jacobian.graph.row_map[inode];
1098  const unsigned iEnd = jacobian.graph.row_map[inode+1];
1099 
1100  const scalar_coord_type c = node_coords(inode,bc_plane);
1101  const bool bc_lower = c <= bc_lower_limit ;
1102  const bool bc_upper = bc_upper_limit <= c ;
1103 
1104  if ( ! init ) {
1105  solution(inode) = bc_lower ? bc_lower_value : (
1106  bc_upper ? bc_upper_value : 0 );
1107  }
1108  else {
1109  if ( bc_lower || bc_upper ) {
1110 
1111  local_residual(inode) = 0 ;
1112 
1113  // zero each value on the row, and leave a one
1114  // on the diagonal
1115 
1116  for( unsigned i = iBeg ; i < iEnd ; ++i ) {
1117  local_jacobian_values(i) = int(inode) == int(jacobian.graph.entries(i)) ? 1 : 0 ;
1118  }
1119  }
1120  else {
1121 
1122  // Find any columns that are boundary conditions.
1123  // Clear them and adjust the residual vector
1124 
1125  for( unsigned i = iBeg ; i < iEnd ; ++i ) {
1126  const unsigned cnode = jacobian.graph.entries(i) ;
1127  const scalar_coord_type cc = node_coords(cnode,bc_plane);
1128 
1129  if ( ( cc <= bc_lower_limit ) || ( bc_upper_limit <= cc ) ) {
1130  local_jacobian_values(i) = 0 ;
1131  }
1132  }
1133  }
1134  }
1135  }
1136 };
1137 
1138 template< typename FixtureType , typename VectorType >
1140 {
1141 public:
1142 
1143  typedef FixtureType fixture_type ;
1144  typedef VectorType vector_type ;
1147 
1150  static const unsigned TensorDim = SpatialDim * SpatialDim ;
1153 
1154  //------------------------------------
1155  // Computational data:
1156 
1160 
1162  : elem_data()
1163  , fixture( rhs.fixture )
1164  , solution( rhs.solution )
1165  {}
1166 
1167  ResponseComputation( const fixture_type& arg_fixture ,
1168  const vector_type & arg_solution )
1169  : elem_data()
1170  , fixture( arg_fixture )
1171  , solution( arg_solution )
1172  {}
1173 
1174  //------------------------------------
1175 
1177  {
1178  value_type response = 0;
1179  //Kokkos::parallel_reduce( fixture.elem_count() , *this , response );
1180  Kokkos::parallel_reduce( solution.dimension_0() , *this , response );
1181  return response;
1182  }
1183 
1184  //------------------------------------
1185 
1186  KOKKOS_INLINE_FUNCTION
1188  const double grad[][ ElemNodeCount ] , // Gradient of bases master element
1189  const double x[] ,
1190  const double y[] ,
1191  const double z[] ) const
1192  {
1193  enum { j11 = 0 , j12 = 1 , j13 = 2 ,
1194  j21 = 3 , j22 = 4 , j23 = 5 ,
1195  j31 = 6 , j32 = 7 , j33 = 8 };
1196 
1197  // Jacobian accumulation:
1198 
1199  double J[ TensorDim ] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1200 
1201  for( unsigned i = 0; i < ElemNodeCount ; ++i ) {
1202  const double x1 = x[i] ;
1203  const double x2 = y[i] ;
1204  const double x3 = z[i] ;
1205 
1206  const double g1 = grad[0][i] ;
1207  const double g2 = grad[1][i] ;
1208  const double g3 = grad[2][i] ;
1209 
1210  J[j11] += g1 * x1 ;
1211  J[j12] += g1 * x2 ;
1212  J[j13] += g1 * x3 ;
1213 
1214  J[j21] += g2 * x1 ;
1215  J[j22] += g2 * x2 ;
1216  J[j23] += g2 * x3 ;
1217 
1218  J[j31] += g3 * x1 ;
1219  J[j32] += g3 * x2 ;
1220  J[j33] += g3 * x3 ;
1221  }
1222 
1223  // Inverse jacobian:
1224 
1225  double invJ[ TensorDim ] = {
1226  static_cast<double>( J[j22] * J[j33] - J[j23] * J[j32] ) ,
1227  static_cast<double>( J[j13] * J[j32] - J[j12] * J[j33] ) ,
1228  static_cast<double>( J[j12] * J[j23] - J[j13] * J[j22] ) ,
1229 
1230  static_cast<double>( J[j23] * J[j31] - J[j21] * J[j33] ) ,
1231  static_cast<double>( J[j11] * J[j33] - J[j13] * J[j31] ) ,
1232  static_cast<double>( J[j13] * J[j21] - J[j11] * J[j23] ) ,
1233 
1234  static_cast<double>( J[j21] * J[j32] - J[j22] * J[j31] ) ,
1235  static_cast<double>( J[j12] * J[j31] - J[j11] * J[j32] ) ,
1236  static_cast<double>( J[j11] * J[j22] - J[j12] * J[j21] ) };
1237 
1238  const double detJ = J[j11] * invJ[j11] +
1239  J[j21] * invJ[j12] +
1240  J[j31] * invJ[j13] ;
1241 
1242  return detJ ;
1243  }
1244 
1245  KOKKOS_INLINE_FUNCTION
1247  const value_type dof_values[] ,
1248  const double detJ ,
1249  const double integ_weight ,
1250  const double bases_vals[] ) const
1251  {
1252  // $$ g_i = \int_{\Omega} T^2 d \Omega $$
1253 
1254  value_type value_at_pt = 0 ;
1255  for ( unsigned m = 0 ; m < ElemNodeCount ; m++ ) {
1256  value_at_pt += dof_values[m] * bases_vals[m] ;
1257  }
1258 
1259  value_type elem_response =
1260  value_at_pt * value_at_pt * detJ * integ_weight ;
1261 
1262  return elem_response;
1263  }
1264 
1265  /*
1266  KOKKOS_INLINE_FUNCTION
1267  void operator()( const unsigned ielem , value_type& response ) const
1268  {
1269  // Gather nodal coordinates and solution vector:
1270 
1271  double x[ ElemNodeCount ] ;
1272  double y[ ElemNodeCount ] ;
1273  double z[ ElemNodeCount ] ;
1274  value_type val[ ElemNodeCount ] ;
1275 
1276  for ( unsigned i = 0 ; i < ElemNodeCount ; ++i ) {
1277  const unsigned ni = fixture.elem_node( ielem , i );
1278 
1279  x[i] = fixture.node_coord( ni , 0 );
1280  y[i] = fixture.node_coord( ni , 1 );
1281  z[i] = fixture.node_coord( ni , 2 );
1282 
1283  val[i] = solution( ni );
1284  }
1285 
1286  for ( unsigned i = 0 ; i < IntegrationCount ; ++i ) {
1287 
1288  const double detJ = compute_detJ( elem_data.gradients[i] , x , y , z );
1289 
1290  response += contributeResponse( val , detJ , elem_data.weights[i] ,
1291  elem_data.values[i] );
1292  }
1293  }
1294  */
1295 
1296  KOKKOS_INLINE_FUNCTION
1297  void operator()( const unsigned i , value_type& response ) const
1298  {
1299  value_type u = solution(i);
1300  response += (u * u) / fixture.node_count_global();
1301  }
1302 
1303  KOKKOS_INLINE_FUNCTION
1304  void init( value_type & response ) const
1305  { response = 0 ; }
1306 
1307  KOKKOS_INLINE_FUNCTION
1308  void join( volatile value_type & response ,
1309  volatile const value_type & input ) const
1310  { response += input ; }
1311 
1312 }; /* ResponseComputation */
1313 
1314 } /* namespace FENL */
1315 } /* namespace Example */
1316 } /* namespace Kokkos */
1317 
1318 //----------------------------------------------------------------------------
1319 
1320 /* A Cuda-specific specialization for the element computation functor. */
1321 #if defined( __CUDACC__ )
1322 // #include <NonlinearElement_Cuda.hpp>
1323 #endif
1324 
1325 //----------------------------------------------------------------------------
1326 
1327 #endif /* #ifndef KOKKOS_EXAMPLE_FENLFUNCTORS_HPP */
static KOKKOS_INLINE_FUNCTION local_view_type create_local_view(const view_type &v, const unsigned local_rank)
KOKKOS_INLINE_FUNCTION void join(volatile unsigned &update, const volatile unsigned &input) const
KOKKOS_INLINE_FUNCTION PCE< Storage > sqrt(const PCE< Storage > &a)
KOKKOS_INLINE_FUNCTION void operator()(const unsigned iwork) const
ExponentialKLCoefficient(const ExponentialKLCoefficient &rhs)
KOKKOS_INLINE_FUNCTION void setRandomVariables(const RandomVariableView &rv)
LocalViewTraits< RandomVariableView > local_rv_view_traits
KOKKOS_INLINE_FUNCTION RandomVariableView getRandomVariables() const
expr val()
static void eval(Kokkos::Example::FENL::DeviceConfig &dev_config_elem, Kokkos::Example::FENL::DeviceConfig &dev_config_bc)
Kokkos::View< const double *[SpaceDim], Device > node_coord_type
Kokkos::DefaultExecutionSpace execution_space
double gradients[integration_count][spatial_dimension][function_count]
Kokkos::View< const unsigned *[ElemNode], Device > elem_node_type
static void eval(Kokkos::Example::FENL::DeviceConfig &dev_config_elem, Kokkos::Example::FENL::DeviceConfig &dev_config_bc)
KOKKOS_INLINE_FUNCTION double operator()(double pt[], unsigned ensemble_rank) const
KOKKOS_INLINE_FUNCTION void atomic_add(volatile Sacado::UQ::PCE< Storage > *const dest, const Sacado::UQ::PCE< Storage > &src)
Kokkos::View< Scalar *, Kokkos::LayoutLeft, Device > RandomVariableView
KOKKOS_INLINE_FUNCTION local_scalar_type operator()(const MeshScalar point[], const size_type ensemble_rank) const
KOKKOS_INLINE_FUNCTION void fill_graph_entries(const unsigned iset) const
local_rv_view_traits::local_view_type local_rv_view_type
ElemNodeIdView::execution_space execution_space
KOKKOS_INLINE_FUNCTION void init(value_type &response) const
ResponseComputation(const fixture_type &arg_fixture, const vector_type &arg_solution)
const IndexType const IndexType const IndexType const IndexType const ValueType const ValueType * x
Definition: csr_vector.h:260
ElementComputationConstantCoefficient(const ElementComputationConstantCoefficient &rhs)
KOKKOS_INLINE_FUNCTION void operator()(const unsigned irow, unsigned &update, const bool final) const
Dim3(const size_t x_, const size_t y_=1, const size_t z_=1)
CrsGraphType::row_map_type::non_const_type RowMapType
KOKKOS_INLINE_FUNCTION void sort_graph_entries(const unsigned irow) const
CrsMatrix(const StaticCrsGraphType &arg_graph)
double values[integration_count][function_count]
void deep_copy(const Stokhos::CrsMatrix< ValueType, DstDevice, Layout > &dst, const Stokhos::CrsMatrix< ValueType, SrcDevice, Layout > &src)
KOKKOS_INLINE_FUNCTION Teuchos::PromotionTraits< typename rv_type::value_type, value_type >::promote evaluate(const point_type &point, const rv_type &random_variables) const
Evaluate random field at a point.
expr expr expr expr j
KOKKOS_INLINE_FUNCTION void fill_set(const unsigned ielem) const
Kokkos::StaticCrsGraph< unsigned, Space, void, unsigned > StaticCrsGraphType
NodeNodeGraph(const ElemNodeIdView &arg_elem_node_id, const unsigned arg_node_count, Times &results)
ElementComputation(const mesh_type &arg_mesh, const CoeffFunctionType &arg_coeff_function, const vector_type &arg_solution, const elem_graph_type &arg_elem_graph, const sparse_matrix_type &arg_jacobian, const vector_type &arg_residual, const Kokkos::Example::FENL::DeviceConfig arg_dev_config)
KOKKOS_INLINE_FUNCTION void fill_elem_graph_map(const unsigned ielem) const
Kokkos::View< unsigned, execution_space > UnsignedValue
DeviceConfig(const size_t num_blocks_=0, const size_t threads_per_block_x_=0, const size_t threads_per_block_y_=0, const size_t threads_per_block_z_=1)
KOKKOS_INLINE_FUNCTION double compute_detJ(const double grad[][ElemNodeCount], const double x[], const double y[], const double z[]) const
Kokkos::Example::HexElement_Data< fixture_type::ElemNode > element_data_type
local_rv_view_traits::local_value_type local_scalar_type
DirichletComputation(const mesh_type &arg_mesh, const vector_type &arg_solution, const sparse_matrix_type &arg_jacobian, const vector_type &arg_residual, const unsigned arg_bc_plane, const bc_scalar_type arg_bc_lower_value, const bc_scalar_type arg_bc_upper_value, const Kokkos::Example::FENL::DeviceConfig arg_dev_config)
KOKKOS_INLINE_FUNCTION PCE< Storage > sin(const PCE< Storage > &a)
KOKKOS_INLINE_FUNCTION value_type contributeResponse(const value_type dof_values[], const double detJ, const double integ_weight, const double bases_vals[]) const
KOKKOS_INLINE_FUNCTION void contributeResidualJacobian(const local_scalar_type dof_values[], const double dpsidx[], const double dpsidy[], const double dpsidz[], const double detJ, const local_scalar_type coeff_k, const double integ_weight, const double bases_vals[], local_scalar_type elem_res[], local_scalar_type elem_mat[][FunctionCount]) const
KOKKOS_INLINE_FUNCTION double transform_gradients(const double grad[][FunctionCount], const double x[], const double y[], const double z[], double dpsidx[], double dpsidy[], double dpsidz[]) const
Kokkos::View< unsigned *[ElemNode][ElemNode], execution_space > ElemGraphType
KOKKOS_INLINE_FUNCTION void operator()(const unsigned i, value_type &response) const
Generate a distributed unstructured finite element mesh from a partitioned NX*NY*NZ box of elements...
Stokhos::KL::ExponentialRandomField< MeshScalar, Device > rf_type
ExponentialKLCoefficient(const MeshScalar mean, const MeshScalar variance, const MeshScalar correlation_length, const size_type num_rv)
KOKKOS_INLINE_FUNCTION void join(volatile value_type &response, volatile const value_type &input) const
KOKKOS_INLINE_FUNCTION void init(unsigned &update) const
const IndexType const IndexType const IndexType const IndexType const ValueType const ValueType ValueType * y
Definition: csr_vector.h:267
void update(const ValueType &alpha, VectorType &x, const ValueType &beta, const VectorType &y)
Kokkos::UnorderedMap< key_type, void, execution_space > SetType