Sierra Toolkit  Version of the Day
BulkDataEndSync.cpp
1 /*------------------------------------------------------------------------*/
2 /* Copyright 2010 Sandia Corporation. */
3 /* Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive */
4 /* license for use of this work by or on behalf of the U.S. Government. */
5 /* Export of this program may require a license from the */
6 /* United States Government. */
7 /*------------------------------------------------------------------------*/
8 
13 //----------------------------------------------------------------------
14 
15 #include <stdexcept>
16 #include <iostream>
17 #include <sstream>
18 #include <set>
19 #include <vector>
20 #include <algorithm>
21 
22 #include <stk_util/environment/ReportHandler.hpp>
23 
24 #include <stk_util/parallel/ParallelComm.hpp>
25 #include <stk_util/parallel/ParallelReduce.hpp>
26 
27 #include <stk_mesh/base/BulkData.hpp>
28 #include <stk_mesh/base/MetaData.hpp>
29 #include <stk_mesh/base/Entity.hpp>
30 #include <stk_mesh/base/EntityComm.hpp>
31 #include <stk_mesh/base/Trace.hpp>
32 
33 //----------------------------------------------------------------------
34 
35 namespace stk_classic {
36 namespace mesh {
37 
38 bool comm_mesh_verify_parallel_consistency(
39  BulkData & M , std::ostream & error_log );
40 
41 //----------------------------------------------------------------------
42 
43 unsigned BulkData::determine_new_owner( Entity & entity ) const
44 {
45  // We will decide the new owner by looking at all the processes sharing
46  // this entity. The new owner will be the sharing process with lowest rank.
47 
48  // The local process is a candidate only if the entity is not destroyed.
49  unsigned new_owner =
50  EntityLogDeleted == entity.log_query() ? ~0u : m_parallel_rank ;
51 
52  for ( PairIterEntityComm
53  share = m_entity_comm_map.sharing(entity.key()); ! share.empty() ; ++share ) {
54  if ( share->proc < m_parallel_size &&
55  ( new_owner < share->proc || m_parallel_size <= new_owner ) ) {
56  new_owner = share->proc ;
57  }
58  }
59 
60  return new_owner ;
61 }
62 
63 //----------------------------------------------------------------------
64 
65 namespace {
66 
67 // A method for quickly finding an entity
68 Entity* find_entity(const EntityVector& entities, const EntityKey& key,
69  bool expect_success = false)
70 {
71  EntityVector::const_iterator itr =
72  std::lower_bound(entities.begin(),
73  entities.end(),
74  key,
75  EntityLess());
76  if (itr == entities.end() || (*itr)->key() != key) {
77  ThrowRequireMsg(!expect_success,
78  "Expected to be able to find entity of type: " <<
79  key.type() << " and rank: " << key.rank());
80  return NULL;
81  }
82  return *itr;
83 }
84 
85 struct EntityProcState {
86  EntityProc entity_proc;
88 
89  bool operator<(const EntityProcState& rhs) const
90  {
91  EntityLess el;
92  return el(entity_proc, rhs.entity_proc);
93  }
94 };
95 
96 bool pack_entity_modification( const BulkData & mesh ,
97  const bool pack_shared ,
98  CommAll & comm )
99 {
100  bool flag = false ;
101 
102  const std::vector<Entity*> & entity_comm = mesh.entity_comm();
103 
104  for ( std::vector<Entity*>::const_iterator
105  i = entity_comm.begin() ; i != entity_comm.end() ; ++i ) {
106 
107  Entity & entity = **i ;
108 
109  if ( entity.log_query() == EntityLogModified ||
110  entity.log_query() == EntityLogDeleted ) {
111 
112  for ( PairIterEntityComm ec = mesh.entity_comm(entity.key()); ! ec.empty() ; ++ec ) {
113  const bool shared = 0 == ec->ghost_id ;
114  if ( pack_shared == shared ) {
115  comm.send_buffer( ec->proc )
116  .pack<EntityKey>( entity.key() )
117  .pack<EntityModificationLog>( entity.log_query() );
118 
119  flag = true ;
120  }
121  }
122  }
123  }
124 
125  return flag ;
126 }
127 
128 void communicate_entity_modification( const BulkData & mesh ,
129  const bool shared ,
130  std::vector<EntityProcState > & data )
131 {
132  CommAll comm( mesh.parallel() );
133 
134  // Sizing send buffers:
135  const bool local_mod = pack_entity_modification( mesh , shared , comm );
136 
137  // Allocation of send and receive buffers:
138  const bool global_mod =
139  comm.allocate_buffers( comm.parallel_size() / 4 , false , local_mod );
140 
141  if ( global_mod ) {
142  const std::vector<Entity*> & entity_comm = mesh.entity_comm();
143 
144  // Packing send buffers:
145  pack_entity_modification( mesh , shared , comm );
146 
147  comm.communicate();
148 
149  for ( unsigned p = 0 ; p < comm.parallel_size() ; ++p ) {
150  CommBuffer & buf = comm.recv_buffer( p );
151  EntityKey key ;
152  EntityProcState tmp ;
153 
154  while ( buf.remaining() ) {
155 
156  buf.unpack<EntityKey>( key )
157  .unpack<EntityModificationLog>( tmp.state );
158 
159  // search through entity_comm, should only receive info on entities
160  // that are communicated.
161  tmp.entity_proc.first = find_entity(entity_comm, key, true);
162  tmp.entity_proc.second = p ;
163 
164  data.push_back( tmp );
165  }
166  }
167  }
168 
169  std::sort( data.begin() , data.end() );
170 }
171 
172 }
173 
174 //----------------------------------------------------------------------
175 //----------------------------------------------------------------------
176 
177 // Postconditions:
178 // * DistributedIndex is updated based on entity creation/deletions in the
179 // last modification cycle.
180 // * Comm lists for shared entities are up-to-date.
181 // * shared_new contains all entities that were modified/created on a
182 // different process
183 void BulkData::internal_update_distributed_index(
184  std::vector<Entity*> & shared_new )
185 {
186  Trace_("stk_classic::mesh::BulkData::internal_update_distributed_index");
187 
188  std::vector< parallel::DistributedIndex::KeyType >
189  local_created_or_modified , // only store locally owned/shared entities
190  del_entities_keys ;
191 
192  // Iterate over all entities known to this process, putting
193  // locally deleted entities in del_entities_keys, and putting
194  // modified shared/owned entities in local_created_or_modified.
195  for ( impl::EntityRepository::iterator
196  i = m_entity_repo.begin() ; i != m_entity_repo.end() ; ++i ) {
197 
198  Entity & entity = * i->second ;
199 
200  if ( EntityLogDeleted == entity.log_query() ) {
201  // Has been destroyed
202  del_entities_keys.push_back( entity.key().raw_key() );
203  }
204  else if ( entity.log_query() != EntityLogNoChange &&
205  in_owned_closure( entity , m_parallel_rank ) ) {
206  // Has been changed and is in owned closure, may be shared
207  local_created_or_modified.push_back( entity.key().raw_key() );
208  }
209  }
210 
211  // Update distributed index. Note that the DistributedIndex only
212  // tracks ownership and sharing information.
213  m_entities_index.update_keys( local_created_or_modified , del_entities_keys );
214 
215  if (parallel_size() > 1) {
216  // Retrieve data regarding which processes use the local_created_or_modified
217  // including this process.
218  std::vector< parallel::DistributedIndex::KeyProc >
219  global_created_or_modified ;
220  m_entities_index.query_to_usage( local_created_or_modified ,
221  global_created_or_modified );
222 
223  //------------------------------
224  // Take the usage data and update the sharing comm lists
225  {
226  Entity * entity = NULL ;
227 
228  // Iterate over all global modifications to this entity, this vector is
229  // sorted, so we're guaranteed that all modifications to a particular
230  // entities will be adjacent in this vector.
231  for ( std::vector< parallel::DistributedIndex::KeyProc >::iterator
232  i = global_created_or_modified.begin() ;
233  i != global_created_or_modified.end() ; ++i ) {
234 
235  EntityKey key( & i->first );
236  unsigned modifying_proc = i->second;
237 
238  // key should not be in del_entities_keys
239  ThrowAssertMsg( !std::binary_search(del_entities_keys.begin(),
240  del_entities_keys.end(),
241  i->first),
242  "Key: " << print_entity_key(mesh_meta_data(), key) <<
243  " was locally deleted, but somehow was included in global_created_or_modified; " <<
244  " this probably means there's problem in DistributedIndex." );
245 
246  if ( m_parallel_rank != modifying_proc ) {
247  // Another process also created or updated this entity.
248 
249  // Only want to look up entities at most once
250  if ( entity == NULL || entity->key() != key ) {
251  // Have not looked this entity up by key
252  entity = get_entity( key );
253 
254  shared_new.push_back( entity );
255  }
256 
257  // Add the other_process to the entity's sharing info.
258  m_entity_comm_map.insert(entity->key(), EntityCommInfo( 0, // sharing
259  modifying_proc ) );
260  }
261  }
262  }
263  }
264 }
265 
266 //----------------------------------------------------------------------
267 //----------------------------------------------------------------------
268 
269 namespace {
270 
271 // Enforce that shared entities must be in the owned closure:
272 
273 void destroy_dependent_ghosts( BulkData & mesh , Entity * entity )
274 {
275  for ( ; ; ) {
276  PairIterRelation rel = entity->relations();
277 
278  if ( rel.empty() ) { break ; }
279 
280  Entity * e = rel.back().entity();
281 
282  if ( e->entity_rank() < entity->entity_rank() ) { break ; }
283 
284  ThrowRequireMsg( !in_owned_closure( *e , mesh.parallel_rank()),
285  "Entity " << print_entity_key(e) << " should not be in closure." );
286 
287  destroy_dependent_ghosts( mesh , e );
288  }
289 
290  mesh.destroy_entity( entity );
291 }
292 
293 // Entities with sharing information that are not in the owned closure
294 // have been modified such that they are no longer shared.
295 // These may no longer be needed or may become ghost entities.
296 // There is not enough information so assume they are to be deleted
297 // and let these entities be re-ghosted if they are needed.
298 
299 // Open question: Should an owned and shared entity that does not
300 // have an upward relation to an owned entity be destroyed so that
301 // ownership transfers to another process?
302 
303 void resolve_shared_removed_from_owned_closure( BulkData & mesh )
304 {
305  for ( std::vector<Entity*>::const_reverse_iterator
306  i = mesh.entity_comm().rbegin() ;
307  i != mesh.entity_comm().rend() ; ++i) {
308 
309  Entity * entity = *i ;
310 
311  if ( ! mesh.entity_comm_sharing(entity->key()).empty() &&
312  ! in_owned_closure( *entity , mesh.parallel_rank() ) ) {
313 
314  destroy_dependent_ghosts( mesh , entity );
315  }
316  }
317 }
318 
319 }
320 
321 // Resolve modifications for shared entities:
322 // If not locally destroyed and remotely modified
323 // then set to locally modified.
324 // If remotely destroyed then determine the new owner.
325 //
326 // Post condition:
327 // Shared entities are in-sync with respect to modification state.
328 // Shared communication lists are updated to reflect all deletions.
329 // Ownership has been re-assigned as necessary for deletion
330 // of shared entities.
331 
332 void BulkData::internal_resolve_shared_modify_delete()
333 {
334  Trace_("stk_classic::mesh::BulkData::internal_resolve_shared_modify_delete");
335 
336  ThrowRequireMsg(parallel_size() > 1, "Do not call this in serial");
337 
338  resolve_shared_removed_from_owned_closure( *this );
339 
340  std::vector< EntityProcState > remote_mod ;
341 
342  // Communicate entity modification state for shared entities
343  // the resulting vector is sorted by entity and process.
344  const bool communicate_shared = true ;
345  communicate_entity_modification( *this , communicate_shared , remote_mod );
346 
347  // We iterate backwards over remote_mod to ensure that we hit the
348  // higher-ranking entities first.
349  for ( std::vector<EntityProcState>::reverse_iterator
350  i = remote_mod.rbegin(); i != remote_mod.rend() ; ) {
351 
352  Entity * const entity = i->entity_proc.first ;
353  const bool locally_destroyed = EntityLogDeleted == entity->log_query();
354  bool remote_owner_destroyed = false;
355 
356  // Iterate over all of this entity's remote changes
357  for ( ; i != remote_mod.rend() && i->entity_proc.first == entity ; ++i ) {
358 
359  const unsigned remote_proc = i->entity_proc.second ;
360  const bool remotely_destroyed = EntityLogDeleted == i->state ;
361 
362  // When a shared entity is remotely modified or destroyed
363  // then the local copy is also modified. This modification
364  // status is applied to all related higher ranking entities.
365 
366  if ( ! locally_destroyed ) {
367  m_entity_repo.log_modified( *entity );
368  }
369 
370  // A shared entity is being deleted on the remote process.
371  // Remove it from the sharing communication list.
372  // Ownership changes are processed later, but we'll need
373  // to know if the remote owner destroyed the entity in order
374  // to correctly resolve ownership (it is not sufficient to just
375  // look at the comm list of the entity since there is no
376  // guarantee that the comm list is correct or up-to-date).
377 
378  if ( remotely_destroyed ) {
379  m_entity_comm_map.erase(entity->key(), EntityCommInfo(0,remote_proc) );
380 
381  // check if owner is destroying
382  if ( entity->owner_rank() == remote_proc ) {
383  remote_owner_destroyed = true ;
384  }
385  }
386  }
387 
388  // Have now processed all remote changes knowledge for this entity.
389 
390  PairIterEntityComm new_sharing = m_entity_comm_map.sharing(entity->key());
391  const bool exists_somewhere = ! ( remote_owner_destroyed &&
392  locally_destroyed &&
393  new_sharing.empty() );
394 
395  // If the entity has been deleted everywhere, nothing left to do
396  if ( exists_somewhere ) {
397 
398  const bool old_local_owner = m_parallel_rank == entity->owner_rank();
399 
400  // Giving away ownership to another process in the sharing list:
401  const bool give_ownership = locally_destroyed && old_local_owner ;
402 
403  // If we are giving away ownership or the remote owner destroyed
404  // the entity, then we need to establish a new owner
405  if ( give_ownership || remote_owner_destroyed ) {
406 
407  const unsigned new_owner = determine_new_owner( *entity );
408 
409  m_entity_repo.set_entity_owner_rank( *entity, new_owner );
410  m_entity_repo.set_entity_sync_count( *entity, m_sync_count );
411  }
412 
413  if ( ! locally_destroyed ) {
414 
415  PartVector add_part , remove_part ;
416 
417  if ( new_sharing.empty() ) {
418  // Is no longer shared, remove the shared part.
419  remove_part.push_back(& m_mesh_meta_data.globally_shared_part());
420  }
421 
422  const bool new_local_owner = m_parallel_rank == entity->owner_rank();
423 
424  const bool local_claimed_ownership =
425  ( ! old_local_owner && new_local_owner );
426 
427  if ( local_claimed_ownership ) {
428  // Changing remotely owned to locally owned
429  add_part.push_back( & m_mesh_meta_data.locally_owned_part() );
430  }
431 
432  if ( ! add_part.empty() || ! remove_part.empty() ) {
433  internal_change_entity_parts( *entity , add_part , remove_part );
434  }
435  } // if ( ! locally_destroyed )
436  } // if ( exists_somewhere )
437  } // remote mod loop
438 
439  // Erase all sharing communication lists for Destroyed entities:
440  for ( std::vector<Entity*>::const_reverse_iterator
441  i = entity_comm().rbegin() ; i != entity_comm().rend() ; ++i) {
442  Entity * entity = *i ;
443 
444  if ( EntityLogDeleted == entity->log_query() ) {
445  // m_ghosting[0] is the SHARED communication
446  m_entity_comm_map.erase(entity->key(), *m_ghosting[0] );
447  }
448  }
449 }
450 
451 
452 
453 //----------------------------------------------------------------------
454 // Resolve modifications for ghosted entities:
455 // If a ghosted entity is modified or destroyed on the owning
456 // process then the ghosted entity must be destroyed.
457 //
458 // Post condition:
459 // Ghosted entities of modified or deleted entities are destroyed.
460 // Ghosted communication lists are cleared to reflect all deletions.
461 
462 void BulkData::internal_resolve_ghosted_modify_delete()
463 {
464  Trace_("stk_classic::mesh::BulkData::internal_resolve_ghosted_modify_delete");
465 
466  ThrowRequireMsg(parallel_size() > 1, "Do not call this in serial");
467  // Resolve modifications for ghosted entities:
468 
469  std::vector<EntityProcState > remote_mod ;
470 
471  // Communicate entity modification state for ghost entities
472  const bool communicate_shared = false ;
473  communicate_entity_modification( *this , communicate_shared , remote_mod );
474 
475  const size_t ghosting_count = m_ghosting.size();
476 
477  std::vector< int > ghosting_change_flags( ghosting_count , 0 );
478 
479  // We iterate backwards over remote_mod to ensure that we hit the
480  // higher-ranking entities first. This is important because higher-ranking
481  // entities like element must be deleted before the nodes they have are
482  // deleted.
483  for ( std::vector<EntityProcState>::reverse_iterator
484  i = remote_mod.rbegin(); i != remote_mod.rend() ; ++i ) {
485  Entity * entity = i->entity_proc.first ;
486  const unsigned remote_proc = i->entity_proc.second ;
487  const bool local_owner = entity->owner_rank() == m_parallel_rank ;
488  const bool remotely_destroyed = EntityLogDeleted == i->state ;
489  const bool locally_destroyed = EntityLogDeleted == entity->log_query();
490 
491  if ( local_owner ) { // Sending to 'remote_proc' for ghosting
492 
493  if ( remotely_destroyed ) {
494 
495  // remove from ghost-send list
496 
497  for ( size_t j = ghosting_count ; j-- ; ) {
498  if ( m_entity_comm_map.erase( entity->key(), EntityCommInfo( j , remote_proc ) ) ) {
499  ghosting_change_flags[ j ] = true ;
500  }
501  }
502  }
503 
504  // Remotely modified ghosts are ignored
505 
506  }
507  else { // Receiving from 'remote_proc' for ghosting
508 
509  // Owner modified or destroyed, must locally destroy.
510 
511  for ( PairIterEntityComm ec = m_entity_comm_map.comm(entity->key()) ; ! ec.empty() ; ++ec ) {
512  ghosting_change_flags[ ec->ghost_id ] = true ;
513  }
514 
515  // This is a receive ghost so the only communication information
516  // is the ghosting information, can clear it all out.
517  m_entity_comm_map.comm_clear(entity->key());
518 
519  if ( ! locally_destroyed ) {
520 
521  // If mesh modification causes a ghost entity to become
522  // a member of an owned-closure then do not automatically
523  // destroy it. The new sharing status will be resolved
524  // in 'internal_resolve_parallel_create'.
525 
526  if ( ! in_owned_closure( *entity , m_parallel_rank ) ) {
527 
528  const bool destroy_entity_successful = destroy_entity(entity);
529  ThrowRequireMsg(destroy_entity_successful,
530  "Could not destroy ghost entity " << print_entity_key(entity));
531  }
532  }
533  }
534  } // end loop on remote mod
535 
536  // Erase all ghosting communication lists for:
537  // 1) Destroyed entities.
538  // 2) Owned and modified entities.
539 
540  for ( std::vector<Entity*>::const_reverse_iterator
541  i = entity_comm().rbegin() ; i != entity_comm().rend() ; ++i) {
542 
543  Entity & entity = **i ;
544 
545  const bool locally_destroyed = EntityLogDeleted == entity.log_query();
546  const bool locally_owned_and_modified =
547  EntityLogModified == entity.log_query() &&
548  m_parallel_rank == entity.owner_rank() ;
549 
550  if ( locally_destroyed || locally_owned_and_modified ) {
551 
552  // m_ghosting[0] is the SHARED communication
553 
554  for ( size_t j = ghosting_count ; j-- ; ) {
555  if ( m_entity_comm_map.erase( entity.key(), *m_ghosting[j] ) ) {
556  ghosting_change_flags[ j ] = true ;
557  }
558  }
559  }
560  }
561 
562  std::vector< int > ghosting_change_flags_global( ghosting_count , 0 );
563 
564  all_reduce_sum( m_parallel_machine ,
565  & ghosting_change_flags[0] ,
566  & ghosting_change_flags_global[0] ,
567  ghosting_change_flags.size() );
568 
569  for ( unsigned ic = 0 ; ic < ghosting_change_flags_global.size() ; ++ic ) {
570  if ( ghosting_change_flags_global[ic] ) {
571  m_ghosting[ic]->m_sync_count = m_sync_count ;
572  }
573  }
574 }
575 
576 //----------------------------------------------------------------------
577 
578 // Postconditions:
579 // * All shared entities have parallel-consistent owner
580 // * Part membership of shared entities is up-to-date
581 // * m_entity_comm is up-to-date
582 void BulkData::internal_resolve_parallel_create()
583 {
584  Trace_("stk_classic::mesh::BulkData::internal_resolve_parallel_create");
585 
586  ThrowRequireMsg(parallel_size() > 1, "Do not call this in serial");
587 
588  std::vector<Entity*> shared_modified ;
589 
590  // Update the parallel index and
591  // output shared and modified entities.
592  internal_update_distributed_index( shared_modified );
593 
594  // ------------------------------------------------------------
595  // Claim ownership on all shared_modified entities that I own
596  // and which were not created in this modification cycle. All
597  // sharing procs will need to be informed of this claim.
598  CommAll comm_all( m_parallel_machine );
599 
600  for ( int phase = 0; phase < 2; ++phase ) {
601  for ( std::vector<Entity*>::iterator
602  i = shared_modified.begin() ; i != shared_modified.end() ; ++i ) {
603  Entity & entity = **i ;
604  if ( entity.owner_rank() == m_parallel_rank &&
605  entity.log_query() != EntityLogCreated ) {
606 
607  for ( PairIterEntityComm
608  jc = m_entity_comm_map.sharing(entity.key()) ; ! jc.empty() ; ++jc ) {
609  comm_all.send_buffer( jc->proc ) .pack<EntityKey>( entity.key() );
610  }
611  }
612  }
613 
614  if (phase == 0) { //allocation phase
615  comm_all.allocate_buffers( m_parallel_size / 4 );
616  }
617  else { // communication phase
618  comm_all.communicate();
619  }
620  }
621 
622  for ( unsigned p = 0 ; p < m_parallel_size ; ++p ) {
623  CommBuffer & buf = comm_all.recv_buffer( p );
624  EntityKey key ;
625  while ( buf.remaining() ) {
626  buf.unpack<EntityKey>( key );
627 
628  Entity & entity = * get_entity( key );
629 
630  // Set owner, will correct part membership later
631  m_entity_repo.set_entity_owner_rank( entity, p);
632  }
633  }
634 
635  // ------------------------------------------------------------
636  // Update shared created entities.
637  // - Revise ownership to selected processor
638  // - Update sharing.
639  // - Work backward so the 'in_owned_closure' function
640  // can evaluate related higher ranking entities.
641 
642  std::ostringstream error_msg ;
643  int error_flag = 0 ;
644 
645  PartVector shared_part , owned_part ;
646  shared_part.push_back( & m_mesh_meta_data.globally_shared_part() );
647  owned_part.push_back( & m_mesh_meta_data.locally_owned_part() );
648 
649  std::vector<Entity*>::const_reverse_iterator iend = shared_modified.rend();
650  for ( std::vector<Entity*>::const_reverse_iterator
651  i = shared_modified.rbegin() ; i != iend ; ++i) {
652 
653  Entity * entity = *i ;
654 
655  if ( entity->owner_rank() == m_parallel_rank &&
656  entity->log_query() == EntityLogCreated ) {
657 
658  // Created and not claimed by an existing owner
659 
660  const unsigned new_owner = determine_new_owner( *entity );
661 
662  m_entity_repo.set_entity_owner_rank( *entity, new_owner);
663  }
664 
665  if ( entity->owner_rank() != m_parallel_rank ) {
666  // Do not own it and still have it.
667  // Remove the locally owned, add the globally_shared
668  m_entity_repo.set_entity_sync_count( *entity, m_sync_count);
669  internal_change_entity_parts( *entity , shared_part /*add*/, owned_part /*remove*/);
670  }
671  else if ( ! m_entity_comm_map.sharing(entity->key()).empty() ) {
672  // Own it and has sharing information.
673  // Add the globally_shared
674  internal_change_entity_parts( *entity , shared_part /*add*/, PartVector() /*remove*/ );
675  }
676  else {
677  // Own it and does not have sharing information.
678  // Remove the globally_shared
679  internal_change_entity_parts( *entity , PartVector() /*add*/, shared_part /*remove*/);
680  }
681 
682  // Newly created shared entity had better be in the owned closure
683  if ( ! in_owned_closure( *entity , m_parallel_rank ) ) {
684  if ( 0 == error_flag ) {
685  error_flag = 1 ;
686  error_msg
687  << "\nP" << m_parallel_rank << ": " << " FAILED\n"
688  << " The following entities were declared on multiple processors,\n"
689  << " cannot be parallel-shared, and were declared with"
690  << " parallel-ghosting information. {\n";
691  }
692  error_msg << " " << print_entity_key(entity);
693  error_msg << " also declared on" ;
694  for ( PairIterEntityComm ec = entity->sharing(); ! ec.empty() ; ++ec ) {
695  error_msg << " P" << ec->proc ;
696  }
697  error_msg << "\n" ;
698  }
699  }
700 
701  // Parallel-consistent error checking of above loop
702  if ( error_flag ) { error_msg << "}\n" ; }
703  all_reduce( m_parallel_machine , ReduceMax<1>( & error_flag ) );
704  ThrowErrorMsgIf( error_flag, error_msg.str() );
705 
706  // ------------------------------------------------------------
707  // Update m_entity_comm based on shared_modified
708 
709  const size_t n_old = m_entity_comm.size();
710 
711  m_entity_comm.insert( m_entity_comm.end() ,
712  shared_modified.begin() , shared_modified.end() );
713 
714  std::inplace_merge( m_entity_comm.begin() ,
715  m_entity_comm.begin() + n_old ,
716  m_entity_comm.end() ,
717  EntityLess() );
718 
719  {
720  std::vector<Entity*>::iterator i =
721  std::unique( m_entity_comm.begin() , m_entity_comm.end() );
722 
723  m_entity_comm.erase( i , m_entity_comm.end() );
724  }
725 }
726 
727 //----------------------------------------------------------------------
728 
730 {
731  Trace_("stk_classic::mesh::BulkData::modification_end");
732 
733  return internal_modification_end( true );
734 }
735 
736 #if 0
737 
738 namespace {
739 
740 // Very, very handy for debugging parallel resolution...
741 
742 void print_comm_list( const BulkData & mesh , bool doit )
743 {
744  if ( doit ) {
745  std::ostringstream msg ;
746 
747  msg << std::endl ;
748 
749  for ( std::vector<Entity*>::const_iterator
750  i = mesh.entity_comm().begin() ;
751  i != mesh.entity_comm().end() ; ++i ) {
752 
753  Entity & entity = **i ;
754  msg << "P" << mesh.parallel_rank() << ": " ;
755 
756  print_entity_key( msg , MetaData::get(mesh) , entity.key() );
757 
758  msg << " owner(" << entity.owner_rank() << ")" ;
759 
760  if ( EntityLogModified == entity.log_query() ) { msg << " mod" ; }
761  else if ( EntityLogDeleted == entity.log_query() ) { msg << " del" ; }
762  else { msg << " " ; }
763 
764  for ( PairIterEntityComm ec = mesh.entity_comm(entity.key()); ! ec.empty() ; ++ec ) {
765  msg << " (" << ec->ghost_id << "," << ec->proc << ")" ;
766  }
767  msg << std::endl ;
768  }
769 
770  std::cout << msg.str();
771  }
772 }
773 
774 }
775 
776 #endif
777 
778 bool BulkData::internal_modification_end( bool regenerate_aura )
779 {
780  Trace_("stk_classic::mesh::BulkData::internal_modification_end");
781 
782  if ( m_sync_state == SYNCHRONIZED ) { return false ; }
783 
784  if (parallel_size() > 1) {
785  // Resolve modification or deletion of shared entities
786  // which can cause deletion of ghost entities.
787  internal_resolve_shared_modify_delete();
788 
789  // Resolve modification or deletion of ghost entities
790  // by destroying ghost entities that have been touched.
791  internal_resolve_ghosted_modify_delete();
792 
793  // Resolution of shared and ghost modifications can empty
794  // the communication information for entities.
795  // If there is no communication information then the
796  // entity must be removed from the communication list.
797  {
798  std::vector<Entity*>::iterator i = m_entity_comm.begin();
799  bool changed = false ;
800  for ( ; i != m_entity_comm.end() ; ++i ) {
801  if ( m_entity_comm_map.comm((*i)->key()).empty() ) { *i = NULL ; changed = true ; }
802  }
803  if ( changed ) {
804  i = std::remove( m_entity_comm.begin() ,
805  m_entity_comm.end() , (Entity *) NULL );
806  m_entity_comm.erase( i , m_entity_comm.end() );
807  }
808  }
809 
810  // Resolve creation of entities: discover sharing and set unique ownership.
811  internal_resolve_parallel_create();
812 
813  // Resolve part membership for shared entities.
814  // This occurs after resolving creation so created and shared
815  // entities are resolved along with previously existing shared entities.
816  internal_resolve_shared_membership();
817 
818  // Regenerate the ghosting aura around all shared mesh entities.
819  if ( regenerate_aura ) { internal_regenerate_shared_aura(); }
820 
821  // ------------------------------
822  // Verify parallel consistency of mesh entities.
823  // Unique ownership, communication lists, sharing part membership,
824  // application part membership consistency.
825  std::ostringstream msg ;
826  bool is_consistent = true;
827  is_consistent = comm_mesh_verify_parallel_consistency( *this , msg );
828  ThrowErrorMsgIf( !is_consistent, msg.str() );
829  }
830  else {
831  std::vector<Entity*> shared_modified ;
832  internal_update_distributed_index( shared_modified );
833  }
834 
835  // ------------------------------
836  // The very last operation performed is to sort the bucket entities.
837  // This does not change the entities, relations, or field data.
838  // However, it insures that the ordering of entities and buckets
839  // is independent of the order in which a set of changes were
840  // performed.
841  //
842  //optimize_buckets combines multiple buckets in a bucket-family into
843  //a single larger bucket, and also does a sort.
844  //If optimize_buckets has not been requested, still do the sort.
845  if (m_optimize_buckets) m_bucket_repository.optimize_buckets();
846  else m_bucket_repository.internal_sort_bucket_entities();
847 
848  // ------------------------------
849 
850  m_sync_state = SYNCHRONIZED ;
851 
852  return true ;
853 }
854 
855 //----------------------------------------------------------------------
856 //----------------------------------------------------------------------
857 
858 enum { PART_ORD_UNIVERSAL = 0 };
859 enum { PART_ORD_OWNED = 1 };
860 enum { PART_ORD_SHARED = 2 };
861 
862 namespace {
863 
864 void pack_induced_memberships( CommAll & comm ,
865  const std::vector<Entity*> & entity_comm )
866 {
867  for ( std::vector<Entity*>::const_iterator
868  i = entity_comm.begin() ; i != entity_comm.end() ; ++i ) {
869 
870  Entity & entity = **i ;
871 
872  if ( in_shared( entity , entity.owner_rank() ) ) {
873  // Is shared with owner, send to owner.
874 
875  OrdinalVector empty , induced ;
876 
877  induced_part_membership( entity , empty , induced );
878 
879  CommBuffer & buf = comm.send_buffer( entity.owner_rank() );
880 
881  unsigned tmp = induced.size();
882 
883  buf.pack<unsigned>( tmp );
884 
885  for ( OrdinalVector::iterator
886  j = induced.begin() ; j != induced.end() ; ++j ) {
887  buf.pack<unsigned>( *j );
888  }
889  }
890  }
891 }
892 
893 void generate_send_list( const size_t sync_count ,
894  const unsigned p_rank ,
895  const std::vector<Entity*> & entity_comm ,
896  std::vector<EntityProc> & send_list )
897 {
898  for ( std::vector<Entity*>::const_iterator
899  i = entity_comm.begin() ; i != entity_comm.end() ; ++i ) {
900 
901  Entity & entity = **i ;
902 
903  if ( entity.owner_rank() == p_rank &&
904  entity.synchronized_count() == sync_count ) {
905 
906  for ( PairIterEntityComm ec = entity.comm() ; ! ec.empty() ; ++ec ) {
907  EntityProc tmp( & entity , ec->proc );
908  send_list.push_back( tmp );
909  }
910  }
911  }
912 
913  {
914  std::sort( send_list.begin() , send_list.end() , EntityLess() );
915  std::vector<EntityProc>::iterator i =
916  std::unique( send_list.begin() , send_list.end() );
917  send_list.erase( i , send_list.end() );
918  }
919 }
920 
921 void pack_part_memberships( CommAll & comm ,
922  const std::vector<EntityProc> & send_list )
923 {
924  for ( std::vector<EntityProc>::const_iterator
925  i = send_list.begin() ; i != send_list.end() ; ++i ) {
926 
927  Entity & entity = * i->first ;
928 
929  std::pair<const unsigned *, const unsigned *>
930  part_ord = entity.bucket().superset_part_ordinals();
931 
932  // I am the owner; therefore, the first three members are
933  // universal, uses, and owns. Don't send them.
934 
935  // I am the owner. The first two memberships are
936  // universal_part and locally_owned_part. The third
937  // membership may be globally_shared_part ;
938 
939  const unsigned count_all = part_ord.second - part_ord.first ;
940  const unsigned count_skip =
941  ( 2 < count_all && part_ord.first[2] == PART_ORD_SHARED ) ? 3 : 2 ;
942 
943  const unsigned count_send = count_all - count_skip ;
944 
945  const unsigned * const start_send = part_ord.first + count_skip ;
946 
947  comm.send_buffer( i->second ).pack<EntityKey>( entity.key() )
948  .pack<unsigned>( count_send )
949  .pack<unsigned>( start_send , count_send );
950  }
951 }
952 
953 }
954 
955 // Mesh entity membership changes must be synchronized among
956 // processes that share mesh entities and propagated to
957 // processes that ghost copies of the mesh entities.
958 //
959 // Precondition: correct shared and ghosting lists.
960 //
961 // Part memberships may have been added or removed
962 // either explicitly or indirectly via entity relationships
963 // being added or removed.
964 
965 void BulkData::internal_resolve_shared_membership()
966 {
967  Trace_("stk_classic::mesh::BulkData::internal_resolve_shared_membership");
968 
969  ThrowRequireMsg(parallel_size() > 1, "Do not call this in serial");
970 
971  const MetaData & meta = m_mesh_meta_data ;
972  ParallelMachine p_comm = m_parallel_machine ;
973  const unsigned p_rank = m_parallel_rank ;
974  const unsigned p_size = m_parallel_size ;
975  const PartVector & all_parts = meta.get_parts();
976 
977  const Part & part_universal = meta.universal_part();
978  const Part & part_owned = meta.locally_owned_part();
979  const Part & part_shared = meta.globally_shared_part();
980 
981  // Quick verification of part ordinal assumptions
982 
983  ThrowRequireMsg(PART_ORD_UNIVERSAL == part_universal.mesh_meta_data_ordinal(),
984  "Universal part ordinal is wrong, expected "
985  << PART_ORD_UNIVERSAL << ", got: "
986  << part_universal.mesh_meta_data_ordinal());
987 
988  ThrowRequireMsg(PART_ORD_OWNED == part_owned.mesh_meta_data_ordinal(),
989  "Owned part ordinal is wrong, expected "
990  << PART_ORD_OWNED << ", got: "
991  << part_owned.mesh_meta_data_ordinal());
992 
993  ThrowRequireMsg(PART_ORD_SHARED == part_shared.mesh_meta_data_ordinal(),
994  "Shared part ordinal is wrong, expected "
995  << PART_ORD_SHARED << ", got: "
996  << part_shared.mesh_meta_data_ordinal());
997 
998  // Shared entities may have been modified due to relationship changes.
999  // Send just the current induced memberships from the sharing to
1000  // the owning processes.
1001  {
1002  CommAll comm( p_comm );
1003 
1004  pack_induced_memberships( comm , m_entity_comm );
1005 
1006  comm.allocate_buffers( p_size / 4 );
1007 
1008  pack_induced_memberships( comm , m_entity_comm );
1009 
1010  comm.communicate();
1011 
1012  for ( std::vector<Entity*>::iterator
1013  i = m_entity_comm.begin() ; i != m_entity_comm.end() ; ++i ) {
1014 
1015  Entity & entity = **i ;
1016 
1017  if ( entity.owner_rank() == p_rank ) {
1018  // Receiving from all sharing processes
1019 
1020  OrdinalVector empty , induced_parts , current_parts , remove_parts ;
1021 
1022  induced_part_membership( entity , empty , induced_parts );
1023 
1024  for ( PairIterEntityComm
1025  ec = entity.sharing() ; ! ec.empty() ; ++ec ) {
1026 
1027  CommBuffer & buf = comm.recv_buffer( ec->proc );
1028 
1029  unsigned count = 0 ; buf.unpack<unsigned>( count );
1030  for ( unsigned j = 0 ; j < count ; ++j ) {
1031  unsigned part_ord = 0 ; buf.unpack<unsigned>( part_ord );
1032  insert_ordinal( induced_parts , part_ord );
1033  }
1034  }
1035 
1036  // Remove any part that is an induced part but is not
1037  // in the induced parts list.
1038 
1039  entity.bucket().supersets( current_parts );
1040 
1041  OrdinalVector::const_iterator induced_parts_begin = induced_parts.begin(),
1042  induced_parts_end = induced_parts.end();
1043 
1044  for ( OrdinalVector::iterator
1045  p = current_parts.begin() ; p != current_parts.end() ; ++p ) {
1046  if ( membership_is_induced( *meta.get_parts()[*p] , entity.entity_rank() ) &&
1047  ! contains_ordinal( induced_parts_begin, induced_parts_end , *p ) ) {
1048  remove_parts.push_back( *p );
1049  }
1050  }
1051 
1052  internal_change_entity_parts( entity, induced_parts, remove_parts );
1053  }
1054  }
1055  }
1056 
1057  //------------------------------
1058  // The owners have complete knowledge of memberships.
1059  // Send membership information to sync the shared and ghosted copies.
1060  // Only need to do this for entities that have actually changed.
1061 
1062  {
1063  std::vector<EntityProc> send_list ;
1064 
1065  generate_send_list( m_sync_count, p_rank, m_entity_comm, send_list);
1066 
1067  CommAll comm( p_comm );
1068 
1069  pack_part_memberships( comm , send_list );
1070 
1071  comm.allocate_buffers( p_size / 4 );
1072 
1073  pack_part_memberships( comm , send_list );
1074 
1075  comm.communicate();
1076 
1077  for ( unsigned p = 0 ; p < p_size ; ++p ) {
1078  CommBuffer & buf = comm.recv_buffer( p );
1079  while ( buf.remaining() ) {
1080 
1081  PartVector owner_parts , current_parts , remove_parts ;
1082 
1083  EntityKey key ; buf.unpack<EntityKey>( key );
1084  unsigned count = 0 ; buf.unpack<unsigned>( count );
1085  for ( unsigned j = 0 ; j < count ; ++j ) {
1086  unsigned part_ord = 0 ; buf.unpack<unsigned>( part_ord );
1087  insert( owner_parts , * all_parts[ part_ord ] );
1088  }
1089 
1090  // Any current part that is not a member of owners_parts
1091  // must be removed.
1092 
1093  Entity * const entity = find_entity(m_entity_comm, key, true);
1094 
1095  entity->bucket().supersets( current_parts );
1096 
1097  for ( PartVector::iterator
1098  ip = current_parts.begin() ; ip != current_parts.end() ; ++ip ) {
1099  Part * const part = *ip ;
1100  const unsigned part_ord = part->mesh_meta_data_ordinal();
1101  if ( PART_ORD_UNIVERSAL != part_ord &&
1102  PART_ORD_OWNED != part_ord &&
1103  PART_ORD_SHARED != part_ord &&
1104  ! contain( owner_parts , *part ) ) {
1105  remove_parts.push_back( part );
1106  }
1107  }
1108 
1109  internal_change_entity_parts( *entity , owner_parts , remove_parts );
1110  }
1111  }
1112  }
1113 }
1114 
1115 } // namespace mesh
1116 } // namespace stk_classic
1117 
PairIterEntityComm comm() const
Complete communicaiton list for this entity.
Definition: Entity.hpp:181
size_t synchronized_count() const
The mesh bulk data synchronized_count when this entity&#39;s part membership was most recently modified...
Definition: Entity.hpp:154
const std::vector< Entity * > & entity_comm() const
All entities with communication information.
Definition: BulkData.hpp:367
void remove(PartVector &v, Part &part)
Remove a part from a properly ordered collection of parts.
Definition: Part.cpp:98
bool membership_is_induced(const Part &part, unsigned entity_rank)
Query if a member entity of the given entity type has an induced membership.
Definition: Relation.cpp:194
void all_reduce_sum(ParallelMachine comm, const double *local, double *global, unsigned count)
Parallel summation to all processors.
void induced_part_membership(Part &part, unsigned entity_rank_from, unsigned entity_rank_to, RelationIdentifier relation_identifier, OrdinalVector &induced_parts, bool include_supersets)
Induce entities&#39; part membership based upon relationships between entities. Insert the result into &#39;i...
Definition: Relation.cpp:211
Bucket & bucket() const
The bucket which holds this mesh entity&#39;s field data.
Definition: Entity.hpp:141
const EntityKey & key() const
The globally unique key ( entity type + identifier ) of this entity.
Definition: Entity.hpp:138
std::pair< const unsigned *, const unsigned * > superset_part_ordinals() const
Definition: Bucket.hpp:188
Entity * get_entity(EntityRank entity_rank, EntityId entity_id) const
Get entity with a given key.
Definition: BulkData.hpp:211
std::pair< Entity *, unsigned > EntityProc
Pairing of an entity with a processor rank.
Definition: Types.hpp:111
EntityModificationLog log_query() const
Query the current state of the entity log.
Definition: Entity.hpp:125
bool contain(const PartVector &v, const Part &part)
Query containment within properly ordered PartVector.
Definition: Part.cpp:108
bool modification_end()
Parallel synchronization of modifications and transition to the guaranteed parallel consistent state...
unsigned parallel_size() const
Size of the parallel machine.
Definition: BulkData.hpp:82
PairIterRelation relations() const
All Entity relations for which this entity is a member. The relations are ordered from lowest entity-...
Definition: Entity.hpp:161
Manager for an integrated collection of entities, entity relations, and buckets of field data...
Definition: BulkData.hpp:49
const MetaData & mesh_meta_data() const
The meta data manager for this bulk data manager.
Definition: BulkData.hpp:76
A fundamental unit within the discretization of a problem domain, including but not limited to nodes...
Definition: Entity.hpp:120
Sierra Toolkit.
MPI_Comm ParallelMachine
Definition: Parallel.hpp:32
unsigned parallel_rank() const
Rank of the parallel machine&#39;s local processor.
Definition: BulkData.hpp:85
EntityRank entity_rank() const
The rank of this entity.
Definition: Entity.hpp:128
std::vector< Part *> PartVector
Collections of parts are frequently maintained as a vector of Part pointers.
Definition: Types.hpp:31
PairIter< std::vector< EntityCommInfo >::const_iterator > PairIterEntityComm
Span of ( communication-subset-ordinal , process-rank ) pairs for the communication of an entity...
Definition: Types.hpp:128
bool destroy_entity(Entity *&entity)
Request the destruction an entity on the local process.
Definition: BulkData.cpp:698
unsigned owner_rank() const
Parallel processor rank of the processor which owns this entity.
Definition: Entity.hpp:175
bool insert(PartVector &v, Part &part)
Insert a part into a properly ordered collection of parts. Returns true if this is a new insertion...
Definition: Part.cpp:85
eastl::iterator_traits< InputIterator >::difference_type count(InputIterator first, InputIterator last, const T &value)