15 #include <stk_util/parallel/ParallelComm.hpp> 16 #include <stk_util/parallel/DistributedIndex.hpp> 18 #include <stk_util/util/RadixSort.hpp> 29 bool operator()(
const DistributedIndex::KeyProc & lhs ,
30 const DistributedIndex::KeyType & rhs )
const 31 {
return lhs.first < rhs ; }
35 void sort_unique( std::vector<DistributedIndex::KeyProc> & key_usage )
37 std::vector<DistributedIndex::KeyProc>::iterator
38 i = key_usage.begin() ,
43 i = std::unique( i , j );
45 key_usage.erase( i , j );
48 void sort_unique( std::vector<DistributedIndex::KeyType> & keys )
50 stk_classic::util::radix_sort_unsigned((keys.empty() ? NULL : &keys[0]), keys.size());
52 std::vector<DistributedIndex::KeyType>::iterator
56 i = std::unique( i , j );
62 inline void reserve_for_recv_buffer(
const CommAll& all,
const DistributedIndex::ProcType& comm_size, std::vector<T>& v)
64 unsigned num_remote = 0;
65 for (DistributedIndex::ProcType p = 0 ; p < comm_size ; ++p ) {
66 CommBuffer & buf = all.recv_buffer( p );
67 num_remote += buf.remaining() /
sizeof(T);
69 v.reserve(v.size() + num_remote);
74 inline void unpack_recv_buffer(
const CommAll& all,
const DistributedIndex::ProcType& comm_size, std::vector<T>& v)
76 reserve_for_recv_buffer(all, comm_size, v);
77 for (DistributedIndex::ProcType p = 0 ; p < comm_size ; ++p ) {
78 CommBuffer & buf = all.recv_buffer( p );
79 while ( buf.remaining() ) {
89 inline void unpack_with_proc_recv_buffer(
const CommAll& all,
const DistributedIndex::ProcType& comm_size, std::vector<std::pair<T,DistributedIndex::ProcType> >& v)
91 reserve_for_recv_buffer(all, comm_size, v);
92 for ( DistributedIndex::ProcType p = 0 ; p < comm_size ; ++p ) {
93 CommBuffer & buf = all.recv_buffer( p );
94 std::pair<T,DistributedIndex::ProcType> kp;
96 while ( buf.remaining() ) {
97 buf.unpack( kp.first );
107 enum { DISTRIBUTED_INDEX_CHUNK_BITS = 12 };
109 enum { DISTRIBUTED_INDEX_CHUNK_SIZE =
110 size_t(1) << DISTRIBUTED_INDEX_CHUNK_BITS };
112 DistributedIndex::ProcType
113 DistributedIndex::to_which_proc(
const DistributedIndex::KeyType & key )
const 115 for (
size_t i = 0 ; i < m_span_count ; ++i ) {
116 if ( m_key_span[i].first <= key && key <= m_key_span[i].second ) {
117 const KeyType offset = key - m_key_span[i].first ;
118 return ( offset >> DISTRIBUTED_INDEX_CHUNK_BITS ) % m_comm_size ;
126 DistributedIndex::~DistributedIndex() {}
128 DistributedIndex::DistributedIndex (
130 const std::vector<KeySpan> & partition_bounds )
139 info[0] = partition_bounds.size();
144 for ( std::vector<KeySpan>::const_iterator
145 i = partition_bounds.begin() ; i != partition_bounds.end() ; ++i ) {
146 if ( i->second < i->first ||
147 ( i != partition_bounds.begin() && i->first <= (i-1)->second ) ) {
152 #if defined( STK_HAS_MPI ) 153 if (m_comm_size > 1) {
154 MPI_Bcast( info , 2 , MPI_UNSIGNED , 0 , comm );
158 m_key_span.resize( info[0] );
160 m_key_span = partition_bounds ;
162 if (m_comm_size > 1) {
163 MPI_Bcast( (m_key_span.empty() ? NULL : & m_key_span[0]), info[0] *
sizeof(KeySpan), MPI_BYTE, 0, comm );
167 m_key_span = partition_bounds ;
171 std::ostringstream msg ;
172 msg <<
"sierra::parallel::DistributedIndex ctor( comm , " ;
174 for ( std::vector<KeySpan>::const_iterator
175 i = partition_bounds.begin() ; i != partition_bounds.end() ; ++i ) {
176 msg <<
" ( min = " << i->first <<
" , max = " << i->second <<
" )" ;
178 msg <<
" ) contains invalid span of keys" ;
179 throw std::runtime_error( msg.str() );
182 m_span_count = info[0] ;
184 if ( 0 == m_span_count ) {
185 m_key_span.push_back(
186 KeySpan( std::numeric_limits<KeyType>::min(),
187 std::numeric_limits<KeyType>::max() ) );
197 bool is_sorted_and_unique(
const std::vector<DistributedIndex::KeyProc> & key_usage )
199 std::vector<DistributedIndex::KeyProc>::const_iterator itr = key_usage.begin();
200 std::vector<DistributedIndex::KeyProc>::const_iterator end = key_usage.end();
201 for ( ; itr != end; ++itr ) {
202 if ( itr + 1 != end && *itr >= *(itr + 1) ) {
209 void query_pack_to_usage(
210 const std::vector<DistributedIndex::KeyProc> & key_usage ,
211 const std::vector<DistributedIndex::KeyType> & request ,
214 std::vector<DistributedIndex::KeyProc>::const_iterator i = key_usage.begin();
215 std::vector<DistributedIndex::KeyType>::const_iterator k = request.begin();
217 for ( ; k != request.end() && i != key_usage.end() ; ++k ) {
219 for ( ; i != key_usage.end() && i->first < *k ; ++i );
221 std::vector<DistributedIndex::KeyProc>::const_iterator j = i ;
222 for ( ; j != key_usage.end() && j->first == *k ; ++j );
224 for ( std::vector<DistributedIndex::KeyProc>::const_iterator
225 jsend = i ; jsend != j ; ++jsend ) {
227 for ( std::vector<DistributedIndex::KeyProc>::const_iterator
228 jinfo = i ; jinfo != j ; ++jinfo ) {
230 all.send_buffer( jsend->second )
231 .pack<DistributedIndex::KeyProc>( *jinfo );
237 void query_pack(
const std::vector<DistributedIndex::KeyProc> & key_usage ,
238 const std::vector<DistributedIndex::KeyProc> & request ,
241 std::vector<DistributedIndex::KeyProc>::const_iterator i = key_usage.begin();
243 for ( std::vector<DistributedIndex::KeyProc>::const_iterator
244 k = request.begin() ;
245 k != request.end() &&
246 i != key_usage.end() ; ++k ) {
248 for ( ; i != key_usage.end() && i->first < k->first ; ++i );
250 for ( std::vector<DistributedIndex::KeyProc>::const_iterator j = i ;
251 j != key_usage.end() && j->first == k->first ; ++j ) {
252 all.send_buffer( k->second ).pack<DistributedIndex::KeyProc>( *j );
260 const std::vector<DistributedIndex::KeyProc> & request ,
261 std::vector<DistributedIndex::KeyProc> & sharing_of_keys )
const 263 sharing_of_keys.clear();
265 CommAll all( m_comm );
267 query_pack( m_key_usage , request , all );
269 all.allocate_buffers( m_comm_size / 4 ,
false );
271 query_pack( m_key_usage , request , all );
275 unpack_recv_buffer(all, m_comm_size, sharing_of_keys);
277 std::sort( sharing_of_keys.begin() , sharing_of_keys.end() );
281 std::vector<DistributedIndex::KeyProc> & sharing_of_local_keys )
const 283 query( m_key_usage , sharing_of_local_keys );
287 const std::vector<DistributedIndex::KeyType> & keys ,
288 std::vector<DistributedIndex::KeyProc> & sharing_keys )
const 290 std::vector<KeyProc> request ;
293 bool bad_key = false ;
294 CommAll all( m_comm );
296 for ( std::vector<KeyType>::const_iterator
297 k = keys.begin() ; k != keys.end() ; ++k ) {
298 const ProcType p = to_which_proc( *k );
300 if ( p < m_comm_size ) {
301 all.send_buffer( p ).pack<KeyType>( *k );
310 bad_key = all.allocate_buffers( m_comm_size / 4 ,
false , bad_key );
313 throw std::runtime_error(
"stk_classic::parallel::DistributedIndex::query given a key which is out of range");
316 for ( std::vector<KeyType>::const_iterator
317 k = keys.begin() ; k != keys.end() ; ++k ) {
318 all.send_buffer( to_which_proc( *k ) ).pack<KeyType>( *k );
323 unpack_with_proc_recv_buffer(all, m_comm_size, request);
326 sort_unique( request );
328 query( request , sharing_keys );
332 const std::vector<DistributedIndex::KeyType> & keys ,
333 std::vector<DistributedIndex::KeyProc> & sharing_keys )
const 335 std::vector<KeyType> request ;
338 bool bad_key = false ;
339 CommAll all( m_comm );
341 for ( std::vector<KeyType>::const_iterator
342 k = keys.begin() ; k != keys.end() ; ++k ) {
343 const ProcType p = to_which_proc( *k );
345 if ( p < m_comm_size ) {
346 all.send_buffer( p ).pack<KeyType>( *k );
355 bad_key = all.allocate_buffers( m_comm_size / 4 ,
false , bad_key );
358 throw std::runtime_error(
"stk_classic::parallel::DistributedIndex::query given a key which is out of range");
361 for ( std::vector<KeyType>::const_iterator
362 k = keys.begin() ; k != keys.end() ; ++k ) {
363 all.send_buffer( to_which_proc( *k ) ).pack<KeyType>( *k );
368 unpack_recv_buffer(all, m_comm_size, request);
371 sort_unique( request );
374 CommAll all( m_comm );
376 query_pack_to_usage( m_key_usage , request , all );
378 all.allocate_buffers( m_comm_size / 4 ,
false );
380 query_pack_to_usage( m_key_usage , request , all );
384 unpack_recv_buffer(all, m_comm_size, sharing_keys);
386 std::sort( sharing_keys.begin() , sharing_keys.end() );
395 struct RemoveKeyProc {
397 bool operator()(
const DistributedIndex::KeyProc & kp )
const 398 {
return kp.second < 0 ; }
400 static void mark( std::vector<DistributedIndex::KeyProc> & key_usage ,
401 const DistributedIndex::KeyProc & kp )
403 std::vector<DistributedIndex::KeyProc>::iterator
404 i = std::lower_bound( key_usage.begin(),
405 key_usage.end(), kp.first, KeyProcLess() );
411 while ( i != key_usage.end() && kp.first == i->first && kp.second != i->second) { ++i ; }
413 if ( i != key_usage.end() && kp == *i ) {
418 static void clean( std::vector<DistributedIndex::KeyProc> & key_usage )
420 std::vector<DistributedIndex::KeyProc>::iterator end =
421 std::remove_if( key_usage.begin() , key_usage.end() , RemoveKeyProc() );
422 key_usage.erase( end , key_usage.end() );
429 const std::vector<DistributedIndex::KeyType> & add_new_keys ,
430 const std::vector<DistributedIndex::KeyType> & remove_existing_keys )
432 std::vector<unsigned long> count_remove( m_comm_size , (
unsigned long)0 );
433 std::vector<unsigned long> count_add( m_comm_size , (
unsigned long)0 );
435 size_t local_bad_input = 0 ;
439 for ( std::vector<KeyType>::const_iterator
440 i = remove_existing_keys.begin();
441 i != remove_existing_keys.end(); ++i ) {
442 const ProcType p = to_which_proc( *i );
443 if ( m_comm_size <= p ) {
447 else if ( p != m_comm_rank ) {
448 ++( count_remove[ p ] );
454 for ( std::vector<KeyType>::const_iterator
455 i = add_new_keys.begin();
456 i != add_new_keys.end(); ++i ) {
457 const ProcType p = to_which_proc( *i );
458 if ( p == m_comm_size ) {
462 else if ( p != m_comm_rank ) {
463 ++( count_add[ p ] );
467 CommAll all( m_comm );
474 for (
int p = 0 ; p < m_comm_size ; ++p ) {
475 if ( count_remove[p] || count_add[p] ) {
476 CommBuffer & buf = all.send_buffer( p );
477 buf.skip<
unsigned long>( 1 );
478 buf.skip<KeyType>( count_remove[p] );
479 buf.skip<KeyType>( count_add[p] );
484 const bool symmetry_flag = false ;
485 const bool error_flag = 0 < local_bad_input ;
487 bool global_bad_input =
488 all.allocate_buffers( m_comm_size / 4, symmetry_flag , error_flag );
490 if ( global_bad_input ) {
491 std::ostringstream msg ;
493 if ( 0 < local_bad_input ) {
494 msg <<
"stk_classic::parallel::DistributedIndex::update_keys ERROR Given " 495 << local_bad_input <<
" of " << add_new_keys.size()
496 <<
" add_new_keys outside of any span" ;
499 throw std::runtime_error( msg.str() );
505 for (
int p = 0 ; p < m_comm_size ; ++p ) {
506 if ( count_remove[p] || count_add[p] ) {
507 all.send_buffer( p ).pack<
unsigned long>( count_remove[p] );
512 for ( std::vector<KeyType>::const_iterator
513 i = remove_existing_keys.begin();
514 i != remove_existing_keys.end(); ++i ) {
515 const ProcType p = to_which_proc( *i );
516 if ( p != m_comm_rank ) {
517 all.send_buffer( p ).pack<KeyType>( *i );
522 for ( std::vector<KeyType>::const_iterator
523 i = add_new_keys.begin();
524 i != add_new_keys.end(); ++i ) {
525 const ProcType p = to_which_proc( *i );
526 if ( p != m_comm_rank ) {
527 all.send_buffer( p ).pack<KeyType>( *i );
537 for ( std::vector<KeyType>::const_iterator
538 i = remove_existing_keys.begin();
539 i != remove_existing_keys.end(); ++i ) {
540 const ProcType p = to_which_proc( *i );
541 if ( p == m_comm_rank ) {
542 RemoveKeyProc::mark( m_key_usage , KeyProc( *i , p ) );
549 for (
int p = 0 ; p < m_comm_size ; ++p ) {
550 CommBuffer & buf = all.recv_buffer( p );
551 if ( buf.remaining() ) {
552 unsigned long remove_count = 0 ;
558 buf.unpack<
unsigned long>( remove_count );
560 for ( ; 0 < remove_count ; --remove_count ) {
561 buf.unpack<KeyType>( kp.first );
563 RemoveKeyProc::mark( m_key_usage , kp );
568 RemoveKeyProc::clean( m_key_usage );
574 std::vector<KeyProc> local_key_usage ;
575 local_key_usage.reserve(add_new_keys.size());
576 for ( std::vector<KeyType>::const_iterator
577 i = add_new_keys.begin();
578 i != add_new_keys.end(); ++i ) {
580 const ProcType p = to_which_proc( *i );
581 if ( p == m_comm_rank ) {
582 local_key_usage.push_back( KeyProc( *i , p ) );
587 std::vector<KeyProc> temp_key ;
588 temp_key.reserve(local_key_usage.size() + m_key_usage.size());
589 std::sort( local_key_usage.begin(), local_key_usage.end() );
590 std::merge( m_key_usage.begin(),
592 local_key_usage.begin(),
593 local_key_usage.end(),
594 std::back_inserter(temp_key) );
597 std::vector<KeyProc> remote_key_usage ;
599 unpack_with_proc_recv_buffer(all, m_comm_size, remote_key_usage);
601 std::sort( remote_key_usage.begin(), remote_key_usage.end() );
604 m_key_usage.reserve(temp_key.size() + remote_key_usage.size());
608 std::merge( temp_key.begin(),
610 remote_key_usage.begin(),
611 remote_key_usage.end(),
612 std::back_inserter(m_key_usage) );
615 m_key_usage.erase(std::unique( m_key_usage.begin(),
620 if (!is_sorted_and_unique(m_key_usage)) {
621 throw std::runtime_error(
"Sorted&unique invariant violated!" );
628 void DistributedIndex::generate_new_global_key_upper_bound(
629 const std::vector<size_t> & requests ,
630 std::vector<DistributedIndex::KeyType> & global_key_upper_bound )
const 632 bool bad_request = m_span_count != requests.size();
634 std::ostringstream error_msg ;
637 <<
"sierra::parallel::DistributedIndex::generate_new_keys_global_counts( " ;
639 std::vector<unsigned long>
640 local_counts( m_span_count + 1 , (
unsigned long) 0 ),
641 global_counts( m_span_count + 1 , (
unsigned long) 0 );
648 local_counts[ m_span_count ] = m_span_count != requests.size();
650 if ( m_span_count == requests.size() ) {
652 for (
size_t i = 0 ; i < m_span_count ; ++i ) {
653 local_counts[i] = requests[i] ;
656 std::vector<KeyProc>::const_iterator j = m_key_usage.begin();
658 for (
size_t i = 0 ; i < m_span_count && j != m_key_usage.end() ; ++i ) {
659 const KeyType key_span_last = m_key_span[i].second ;
661 while ( j != m_key_usage.end() && j->first <= key_span_last ) {
662 const KeyType key = j->first ;
663 while ( j != m_key_usage.end() && key == j->first ) { ++j ; }
666 local_counts[i] +=
count ;
670 #if defined( STK_HAS_MPI ) 671 if (m_comm_size > 1) {
672 MPI_Allreduce( (local_counts.empty() ? NULL : & local_counts[0]) , (global_counts.empty() ? NULL : & global_counts[0]) ,
673 m_span_count + 1 , MPI_UNSIGNED_LONG ,
677 global_counts = local_counts ;
680 global_counts = local_counts ;
683 bad_request = global_counts[m_span_count] != 0 ;
686 if ( m_span_count != requests.size() ) {
687 error_msg <<
" requests.size() = " << requests.size()
688 <<
" != " << m_span_count <<
" )" ;
692 if ( ! bad_request ) {
693 for (
unsigned i = 0 ; i < m_span_count ; ++i ) {
694 const size_t span_available =
695 ( 1 + m_key_span[i].second - m_key_span[i].first );
697 const size_t span_requested = global_counts[i];
699 if ( span_available < span_requested ) {
701 error_msg <<
" global_sum( (existing+request)[" << i <<
"] ) = " 703 <<
" > global_sum( span_available ) = " 710 throw std::runtime_error( error_msg.str() );
715 global_key_upper_bound.resize( m_span_count );
717 for (
size_t i = 0 ; i < m_span_count ; ++i ) {
718 global_key_upper_bound[i] = m_key_span[i].first + global_counts[i] - 1 ;
725 void DistributedIndex::generate_new_keys_local_planning(
726 const std::vector<DistributedIndex::KeyType> & key_global_upper_bound ,
727 const std::vector<size_t> & requests_local ,
728 std::vector<long> & new_request ,
729 std::vector<KeyType> & requested_keys ,
730 std::vector<KeyType> & contrib_keys )
const 732 new_request.assign( m_span_count ,
long(0) );
734 contrib_keys.clear();
736 std::vector<KeyProc>::const_iterator j = m_key_usage.begin();
738 for (
size_t i = 0 ; i < m_span_count ; ++i ) {
741 const KeyType key_upper_bound = key_global_upper_bound[i] ;
743 const size_t init_size = contrib_keys.size();
745 const size_t chunk_inc = m_comm_size * DISTRIBUTED_INDEX_CHUNK_SIZE ;
747 const size_t chunk_rsize = m_comm_rank * DISTRIBUTED_INDEX_CHUNK_SIZE ;
749 for ( KeyType key_begin = m_key_span[i].first +
751 key_begin <= key_upper_bound ; key_begin += chunk_inc ) {
754 KeyType key_iter = key_begin ;
757 const KeyType key_last =
758 std::min( key_begin + DISTRIBUTED_INDEX_CHUNK_SIZE - 1 , key_upper_bound );
763 j = std::lower_bound( j, m_key_usage.end(), key_iter, KeyProcLess() );
767 for ( ; key_iter <= key_last ; ++key_iter ) {
768 if ( j == m_key_usage.end() || key_iter < j->first ) {
770 contrib_keys.push_back( key_iter );
775 while ( j != m_key_usage.end() && key_iter == j->first ) {
787 const size_t this_contrib = contrib_keys.size() - init_size ;
790 const size_t keep = std::min( requests_local[i] , this_contrib );
793 requested_keys.insert( requested_keys.end() ,
794 contrib_keys.end() - keep ,
795 contrib_keys.end() );
797 contrib_keys.erase( contrib_keys.end() - keep ,
798 contrib_keys.end() );
801 new_request[i] = requests_local[i] - this_contrib ;
807 void DistributedIndex::generate_new_keys_global_planning(
808 const std::vector<long> & new_request ,
809 std::vector<long> & my_donations )
const 811 my_donations.assign( m_comm_size * m_span_count ,
long(0) );
816 std::vector<long> new_request_global( m_comm_size * m_span_count );
818 #if defined( STK_HAS_MPI ) 820 if (m_comm_size > 1) {
823 #if defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1200) 826 void * send_buf =
const_cast<void*
>( (
void *)( (new_request.empty() ? NULL : & new_request[0]) ));
827 void * recv_buf = (new_request_global.empty() ? NULL : & new_request_global[0]) ;
828 for (
int root = 0; root < m_comm_size; ++root)
830 MPI_Gather( send_buf , m_span_count , MPI_LONG ,
831 recv_buf , m_span_count , MPI_LONG , root, m_comm );
837 void * send_buf =
const_cast<void*
>( (
void *)( (new_request.empty() ? NULL : & new_request[0]) ));
838 void * recv_buf = (new_request_global.empty() ? NULL : & new_request_global[0]) ;
839 MPI_Allgather( send_buf , m_span_count , MPI_LONG ,
840 recv_buf , m_span_count , MPI_LONG , m_comm );
846 new_request_global = new_request ;
849 new_request_global = new_request ;
856 for (
unsigned i = 0 ; i < m_span_count ; ++i ) {
858 if ( new_request[i] < 0 ) {
859 long my_total_donate = - new_request[i] ;
861 long previous_donate = 0 ;
864 for (
int p = 0 ; p < m_comm_rank ; ++p ) {
865 const long new_request_p = new_request_global[ p * m_span_count + i ] ;
866 if ( new_request_p < 0 ) {
867 previous_donate -= new_request_p ;
872 long end_donate = previous_donate + my_total_donate ;
874 long previous_receive = 0 ;
878 for (
int p = 0 ; p < m_comm_size && 0 < my_total_donate ; ++p ) {
880 const long new_request_p = new_request_global[ p * m_span_count + i ];
882 if ( 0 < new_request_p ) {
886 previous_receive += new_request_p ;
888 if ( previous_donate < previous_receive ) {
890 const long n = std::min( previous_receive , end_donate )
893 my_donations[ p * m_span_count + i ] = n ;
894 previous_donate += n ;
895 my_total_donate -= n ;
906 const std::vector<size_t> & requests ,
907 std::vector< std::vector<KeyType> > & requested_keys )
912 std::vector<KeyType> global_key_upper_bound ;
913 std::vector<long> new_request ;
914 std::vector<long> my_donations ;
915 std::vector<KeyType> contrib_keys ;
916 std::vector<KeyType> new_keys ;
922 generate_new_global_key_upper_bound( requests , global_key_upper_bound );
931 generate_new_keys_local_planning( global_key_upper_bound ,
938 generate_new_keys_global_planning( new_request, my_donations );
945 for (
size_t i = m_span_count ; 0 < i ; ) {
948 for (
int p = 0 ; p < m_comm_size ; ++p ) {
949 count += my_donations[ p * m_span_count + i ];
951 std::vector<KeyType>::iterator j_beg = contrib_keys.begin();
952 std::vector<KeyType>::iterator j_end = contrib_keys.end();
953 j_beg = std::lower_bound( j_beg , j_end , m_key_span[i].first );
954 j_end = std::upper_bound( j_beg , j_end , m_key_span[i].second );
955 const size_t n = std::distance( j_beg , j_end );
957 contrib_keys.erase( j_beg + count , j_end );
964 m_key_usage.reserve(m_key_usage.size() + new_keys.size());
965 for ( std::vector<KeyType>::iterator i = new_keys.begin();
966 i != new_keys.end() ; ++i ) {
967 m_key_usage.push_back( KeyProc( *i , m_comm_rank ) );
972 CommAll all( m_comm );
976 for (
size_t i = 0 ; i < m_span_count ; ++i ) {
977 for (
int p = 0 ; p < m_comm_size ; ++p ) {
978 const size_t n_to_p = my_donations[ p * m_span_count + i ];
980 all.send_buffer(p).skip<KeyType>( n_to_p );
985 all.allocate_buffers( m_comm_size / 4 ,
false );
991 for (
size_t i = 0 ; i < m_span_count ; ++i ) {
992 for (
int p = 0 ; p < m_comm_size ; ++p ) {
993 const size_t n_to_p = my_donations[ p * m_span_count + i ];
995 all.send_buffer(p).pack<KeyType>( & contrib_keys[n] , n_to_p );
996 for (
size_t k = 0 ; k < n_to_p ; ++k , ++n ) {
997 m_key_usage.push_back( KeyProc( contrib_keys[n] , p ) );
1004 std::sort( m_key_usage.begin() , m_key_usage.end() );
1009 unpack_recv_buffer( all, m_comm_size, new_keys);
1011 stk_classic::util::radix_sort_unsigned((new_keys.empty() ? NULL : &new_keys[0]), new_keys.size());
1013 requested_keys.resize( m_span_count );
1016 std::vector<KeyType>::iterator i_beg = new_keys.begin();
1017 for (
size_t i = 0 ; i < m_span_count ; ++i ) {
1018 std::vector<KeyType>::iterator i_end = i_beg + requests[i] ;
1019 requested_keys[i].assign( i_beg , i_end );
void query_to_usage(const std::vector< KeyType > &keys, std::vector< KeyProc > &sharing_of_keys) const
Query which processors added the given keys. The results of the query are pushed to the processes on ...
void query(std::vector< KeyProc > &sharing_of_local_keys) const
Query with which process the local added keys are shared.
unsigned parallel_machine_rank(ParallelMachine parallel_machine)
Member function parallel_machine_rank ...
unsigned parallel_machine_size(ParallelMachine parallel_machine)
Member function parallel_machine_size ...
void generate_new_keys(const std::vector< size_t > &requests, std::vector< std::vector< KeyType > > &requested_keys)
Request a collection of unused keys.
void update_keys(const std::vector< KeyType > &add_new_keys, const std::vector< KeyType > &remove_existing_keys)
Update a parallel index with new and changed keys. FIRST: Remove this process' participation in the e...
eastl::iterator_traits< InputIterator >::difference_type count(InputIterator first, InputIterator last, const T &value)