Sierra Toolkit  Version of the Day
BulkDataGhosting.cpp
1 /*------------------------------------------------------------------------*/
2 /* Copyright 2010 Sandia Corporation. */
3 /* Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive */
4 /* license for use of this work by or on behalf of the U.S. Government. */
5 /* Export of this program may require a license from the */
6 /* United States Government. */
7 /*------------------------------------------------------------------------*/
8 
13 #include <cstring>
14 #include <set>
15 #include <stdexcept>
16 #include <iostream>
17 #include <sstream>
18 #include <algorithm>
19 
20 #include <stk_util/parallel/ParallelComm.hpp>
21 #include <stk_util/parallel/ParallelReduce.hpp>
22 
23 #include <stk_mesh/base/Ghosting.hpp>
24 #include <stk_mesh/base/BulkData.hpp>
25 #include <stk_mesh/base/MetaData.hpp>
26 #include <stk_mesh/base/FieldData.hpp>
27 #include <stk_mesh/base/EntityComm.hpp>
28 #include <stk_mesh/base/Comm.hpp>
29 #include <stk_mesh/base/Trace.hpp>
30 
31 using std::strcmp;
32 
33 namespace stk_classic {
34 namespace mesh {
35 
36 //----------------------------------------------------------------------
37 
38 Ghosting & BulkData::create_ghosting( const std::string & name )
39 {
40  Trace_("stk_classic::mesh::BulkData::create_ghosting");
41 
42  require_ok_to_modify();
43 
44  // Verify name is the same on all processors,
45  // if not then throw an exception on all processors.
46  if (parallel_size() > 1) {
47  CommBroadcast bc( parallel() , 0 );
48 
49  if ( bc.parallel_rank() == 0 ) {
50  bc.send_buffer().skip<char>( name.size() + 1 );
51  }
52 
53  bc.allocate_buffer();
54 
55  if ( bc.parallel_rank() == 0 ) {
56  bc.send_buffer().pack<char>( name.c_str() , name.size() + 1 );
57  }
58 
59  bc.communicate();
60 
61  const char * const bc_name =
62  reinterpret_cast<const char *>( bc.recv_buffer().buffer() );
63 
64  int error = 0 != strcmp( bc_name , name.c_str() );
65 
66  all_reduce( parallel() , ReduceMax<1>( & error ) );
67 
68  ThrowErrorMsgIf( error, "Parallel name inconsistency");
69  }
70 
71  Ghosting * const g =
72  new Ghosting( *this , name , m_ghosting.size() , m_sync_count );
73 
74  m_ghosting.push_back( g );
75 
76  return *g ;
77 }
78 
79 //----------------------------------------------------------------------
80 //----------------------------------------------------------------------
81 
82 namespace {
83 
84 void insert_transitive_closure( std::set<EntityProc,EntityLess> & new_send ,
85  const EntityProc & entry );
86 
87 void comm_recv_to_send(
88  BulkData & mesh ,
89  const std::set< Entity * , EntityLess > & new_recv ,
90  std::set< EntityProc , EntityLess > & new_send );
91 
92 void comm_sync_send_recv(
93  BulkData & mesh ,
94  std::set< EntityProc , EntityLess > & new_send ,
95  std::set< Entity * , EntityLess > & new_recv );
96 
97 } // namespace <>
98 
99 //----------------------------------------------------------------------
100 //----------------------------------------------------------------------
101 
103 {
104  Trace_("stk_classic::mesh::BulkData::destroy_all_ghosting");
105 
106  require_ok_to_modify();
107 
108  // Clear Ghosting data
109 
110  for ( std::vector<Ghosting*>::iterator
111  ig = m_ghosting.begin() ; ig != m_ghosting.end() ; ++ig ) {
112  Ghosting & gh = **ig ;
113  gh.m_sync_count = m_sync_count ;
114  }
115 
116  // Iterate backwards so as not to invalidate a closure.
117 
118  std::vector<Entity*>::iterator ie = m_entity_comm.end();
119 
120  while ( ie != m_entity_comm.begin() ) {
121 
122  Entity * entity = *--ie ;
123 
124  if ( in_receive_ghost( *entity ) ) {
125  m_entity_comm_map.comm_clear(entity->key());
126  destroy_entity( entity );
127  *ie = NULL ;
128  }
129  else {
130  m_entity_comm_map.comm_clear_ghosting(entity->key());
131  if ( m_entity_comm_map.comm(entity->key()).empty() ) {
132  *ie = NULL ;
133  }
134  }
135  }
136 
137  ie = std::remove( m_entity_comm.begin() ,
138  m_entity_comm.end() , (Entity*) NULL );
139 
140  m_entity_comm.erase( ie , m_entity_comm.end() );
141 }
142 
143 //----------------------------------------------------------------------
144 
146  Ghosting & ghosts ,
147  const std::vector<EntityProc> & add_send ,
148  const std::vector<Entity*> & remove_receive )
149 {
150  Trace_("stk_classic::mesh::BulkData::change_ghosting");
151 
152  //----------------------------------------
153  // Verify inputs:
154 
155  require_ok_to_modify();
156 
157  const bool ok_mesh = & BulkData::get(ghosts) == this ;
158  const bool ok_ghost = 1 < ghosts.ordinal();
159  bool ok_add = true ;
160  bool ok_remove = true ;
161 
162  // Verify all 'add' are locally owned.
163 
164  for ( std::vector<EntityProc>::const_iterator
165  i = add_send.begin() ; ok_add && i != add_send.end() ; ++i ) {
166  ok_add = i->first->owner_rank() == parallel_rank();
167  }
168 
169  // Verify all 'remove' are members of the ghosting.
170 
171  for ( std::vector<Entity*>::const_iterator
172  i = remove_receive.begin() ;
173  ok_remove && i != remove_receive.end() ; ++i ) {
174  ok_remove = in_receive_ghost( ghosts , **i );
175  }
176 
177  int ok = ok_mesh && ok_ghost && ok_add && ok_remove ;
178 
179  all_reduce( parallel() , ReduceMin<1>( & ok ) );
180 
181  if ( 0 == ok ) {
182  std::ostringstream msg ;
183  msg << "For ghosts " << ghosts.name() << ", " ;
184  if ( ! ok_mesh ) { msg << " : Mesh does not own this ghosting" ; }
185  if ( ! ok_ghost ) { msg << " : Cannot modify this ghosting" ; }
186  if ( ! ok_add ) {
187  msg << " : Not owned add {" ;
188  for ( std::vector<EntityProc>::const_iterator
189  i = add_send.begin() ; i != add_send.end() ; ++i ) {
190  if ( i->first->owner_rank() != parallel_rank() ) {
191  msg << " " << print_entity_key( i->first );
192  }
193  }
194  msg << " }" ;
195  }
196  if ( ! ok_remove ) {
197  msg << " : Not in ghost receive {" ;
198  for ( std::vector<Entity*>::const_iterator
199  i = remove_receive.begin() ; i != remove_receive.end() ; ++i ) {
200  if ( ! in_receive_ghost( ghosts , **i ) ) {
201  msg << " " << print_entity_key( *i );
202  }
203  }
204  }
205 
206  ThrowErrorMsg( msg.str() );
207  }
208  //----------------------------------------
209  // Change the ghosting:
210 
211  internal_change_ghosting( ghosts , add_send , remove_receive );
212 }
213 
214 //----------------------------------------------------------------------
215 
216 void BulkData::internal_change_ghosting(
217  Ghosting & ghosts ,
218  const std::vector<EntityProc> & add_send ,
219  const std::vector<Entity*> & remove_receive )
220 {
221  Trace_("stk_classic::mesh::BulkData::internal_change_ghosting");
222 
223  const MetaData & meta = m_mesh_meta_data ;
224  const unsigned rank_count = meta.entity_rank_count();
225  const unsigned p_size = m_parallel_size ;
226 
227  //------------------------------------
228  // Copy ghosting lists into more efficiently editted container.
229  // The send and receive lists must be in entity rank-order.
230 
231  std::set< EntityProc , EntityLess > new_send ;
232  std::set< Entity * , EntityLess > new_recv ;
233 
234  //------------------------------------
235  // Insert the current ghost receives and then remove from that list.
236 
237  // This if-check is an optimization; if remove_receive is m_entity_comm,
238  // then we are removing all ghosting information and new_recv should
239  // be left empty.
240  if ( & entity_comm() != & remove_receive ) {
241 
242  // Iterate over all entities with communication information, adding
243  // the entity if it's a ghost on this process. new_recv will contain
244  // all ghosts on this process by the end of the loop.
245  for ( std::vector<Entity*>::const_iterator
246  i = entity_comm().begin() ; i != entity_comm().end() ; ++i ) {
247  Entity * const entity = *i ;
248  if ( in_receive_ghost( ghosts , *entity ) ) {
249  new_recv.insert( entity );
250  }
251  }
252 
253  // Remove any entities that are in the remove list.
254 
255  for ( std::vector< Entity * >::const_iterator
256  i = remove_receive.begin() ; i != remove_receive.end() ; ++i ) {
257  new_recv.erase( *i );
258  }
259 
260  // Keep the closure of the remaining received ghosts.
261  // Working from highest-to-lowest key (rank entity type)
262  // results in insertion of the closure because
263  // inserted entities will get looped over after they are inserted.
264 
265  // Insertion will not invalidate the associative container's iterator.
266 
267  for ( std::set< Entity * , EntityLess >::reverse_iterator
268  i = new_recv.rbegin() ; i != new_recv.rend() ; ++i) {
269  const unsigned erank = (*i)->entity_rank();
270 
271  for ( PairIterRelation
272  irel = (*i)->relations(); ! irel.empty() ; ++irel ) {
273  if ( irel->entity_rank() < erank &&
274  in_receive_ghost( ghosts , * irel->entity() ) ) {
275  new_recv.insert( irel->entity() );
276  }
277  }
278  }
279  }
280 
281  // Initialize the new_send from the new_recv
282  comm_recv_to_send( *this , new_recv , new_send );
283 
284  //------------------------------------
285  // Add the specified entities and their closure to the send ghosting
286 
287  for ( std::vector< EntityProc >::const_iterator
288  i = add_send.begin() ; i != add_send.end() ; ++i ) {
289  insert_transitive_closure( new_send , *i );
290  }
291 
292  // Synchronize the send and receive list.
293  // If the send list contains a not-owned entity
294  // inform the owner and receiver to add that entity
295  // to their ghost send and receive lists.
296 
297  comm_sync_send_recv( *this , new_send , new_recv );
298 
299  // The new_send list is now parallel complete and parallel accurate
300  // The new_recv has those ghost entities that are to be kept.
301  //------------------------------------
302  // Remove the ghost entities that will not remain.
303  // If the last reference to the receive ghost entity then delete it.
304 
305  bool removed = false ;
306 
307  for ( std::vector<Entity*>::reverse_iterator
308  i = m_entity_comm.rbegin() ; i != m_entity_comm.rend() ; ++i) {
309  Entity * entity = *i ;
310 
311  const bool is_owner = entity->owner_rank() == m_parallel_rank ;
312  const bool remove_recv = ( ! is_owner ) &&
313  0 == new_recv.count( entity );
314 
315  if ( is_owner ) {
316  // Is owner, potentially removing ghost-sends
317  // Have to make a copy
318 
319  std::vector<EntityCommInfo> comm_ghost ;
320  const PairIterEntityComm ec = m_entity_comm_map.comm(entity->key(),ghosts);
321  comm_ghost.assign( ec.first , ec.second );
322 
323  for ( ; ! comm_ghost.empty() ; comm_ghost.pop_back() ) {
324  const EntityCommInfo tmp = comm_ghost.back();
325 
326  if ( 0 == new_send.count( EntityProc( entity , tmp.proc ) ) ) {
327  m_entity_comm_map.erase(entity->key(),tmp);
328  }
329  }
330  }
331  else if ( remove_recv ) {
332  m_entity_comm_map.erase(entity->key(),ghosts);
333  }
334 
335  if ( m_entity_comm_map.comm(entity->key()).empty() ) {
336  removed = true ;
337  *i = NULL ; // No longer communicated
338  if ( remove_recv ) {
339  ThrowRequireMsg( destroy_entity( entity ),
340  " FAILED attempt to destroy entity: " << print_entity_key(entity) );
341  }
342  }
343  }
344 
345  if ( removed ) {
346  std::vector<Entity*>::iterator i =
347  std::remove( m_entity_comm.begin() ,
348  m_entity_comm.end() , (Entity*) NULL );
349  m_entity_comm.erase( i , m_entity_comm.end() );
350  }
351 
352  //------------------------------------
353  // Push newly ghosted entities to the receivers and update the comm list.
354  // Unpacking must proceed in entity-rank order so that higher ranking
355  // entities that have relations to lower ranking entities will have
356  // the lower ranking entities unpacked first. The higher and lower
357  // ranking entities may be owned by different processes,
358  // as such unpacking must be performed in rank order.
359 
360  {
361  const size_t entity_comm_size = m_entity_comm.size();
362 
363  CommAll comm( m_parallel_machine );
364 
365  for ( std::set< EntityProc , EntityLess >::iterator
366  j = new_send.begin(); j != new_send.end() ; ++j ) {
367 
368  Entity & entity = * j->first ;
369  const unsigned int proc = j->second ;
370 
371  if ( ! in_ghost( ghosts , entity , proc ) ) {
372  // Not already being sent , must send it.
373  CommBuffer & buf = comm.send_buffer( proc );
374  buf.pack<unsigned>( entity.entity_rank() );
375  pack_entity_info( buf , entity );
376  pack_field_values( buf , entity );
377  }
378  }
379 
380  comm.allocate_buffers( p_size / 4 );
381 
382  for ( std::set< EntityProc , EntityLess >::iterator
383  j = new_send.begin(); j != new_send.end() ; ++j ) {
384 
385  Entity & entity = * j->first ;
386  const unsigned int proc = j->second ;
387 
388  if ( ! in_ghost( ghosts , entity , proc ) ) {
389  // Not already being sent , must send it.
390  CommBuffer & buf = comm.send_buffer( proc );
391  buf.pack<unsigned>( entity.entity_rank() );
392  pack_entity_info( buf , entity );
393  pack_field_values( buf , entity );
394 
395  m_entity_comm_map.insert(entity.key(), EntityCommInfo(ghosts.ordinal(), proc));
396 
397  m_entity_comm.push_back( & entity );
398  }
399  }
400 
401  comm.communicate();
402 
403  std::ostringstream error_msg ;
404  int error_count = 0 ;
405 
406  for ( unsigned rank = 0 ; rank < rank_count ; ++rank ) {
407  for ( unsigned p = 0 ; p < p_size ; ++p ) {
408  CommBuffer & buf = comm.recv_buffer(p);
409  while ( buf.remaining() ) {
410  // Only unpack if of the current entity rank.
411  // If not the current entity rank, break the iteration
412  // until a subsequent entity rank iteration.
413  {
414  unsigned this_rank = ~0u ;
415  buf.peek<unsigned>( this_rank );
416 
417  if ( this_rank != rank ) break ;
418 
419  buf.unpack<unsigned>( this_rank );
420  }
421 
422  PartVector parts ;
423  std::vector<Relation> relations ;
424  EntityKey key ;
425  unsigned owner = ~0u ;
426 
427  unpack_entity_info( buf, *this, key, owner, parts, relations );
428 
429  // Must not have the locally_owned_part or globally_shared_part
430 
431  remove( parts , meta.locally_owned_part() );
432  remove( parts , meta.globally_shared_part() );
433 
434  std::pair<Entity*,bool> result =
435  m_entity_repo.internal_create_entity( key );
436 
437  Entity* entity = result.first;
438  const bool created = result.second ;
439  const bool recreated = EntityLogDeleted == entity->log_query();
440 
441  if ( created || recreated ) {
442  m_entity_repo.log_created_parallel_copy( *(entity) );
443  m_entity_repo.set_entity_owner_rank( *(entity), owner);
444  }
445 
446  require_entity_owner( * entity , owner );
447 
448  internal_change_entity_parts( * entity , parts , PartVector() );
449 
450  declare_relation( * entity , relations );
451 
452  if ( ! unpack_field_values( buf , * entity , error_msg ) ) {
453  ++error_count ;
454  }
455 
456  const EntityCommInfo tmp( ghosts.ordinal() , owner );
457 
458  if ( m_entity_comm_map.insert(entity->key(),tmp) ) {
459  m_entity_comm.push_back( entity );
460  }
461  }
462  }
463  }
464 
465  if (parallel_size() > 1) {
466  all_reduce( m_parallel_machine , ReduceSum<1>( & error_count ) );
467  }
468 
469  ThrowErrorMsgIf( error_count, error_msg.str() );
470 
471  if ( entity_comm_size < m_entity_comm.size() ) {
472  // Added new ghosting entities to the list,
473  // must now sort and merge.
474 
475  std::vector<Entity*>::iterator i = m_entity_comm.begin();
476  i += entity_comm_size ;
477  std::sort( i , m_entity_comm.end() , EntityLess() );
478  std::inplace_merge( m_entity_comm.begin() , i ,
479  m_entity_comm.end() , EntityLess() );
480  m_entity_comm.erase( std::unique( m_entity_comm.begin() , m_entity_comm.end() ) ,
481  m_entity_comm.end() );
482  }
483  }
484 
485  ghosts.m_sync_count = m_sync_count ;
486 }
487 
488 //----------------------------------------------------------------------
489 
490 namespace {
491 
492 void insert_transitive_closure( std::set<EntityProc,EntityLess> & new_send ,
493  const EntityProc & entry )
494 {
495  // Do not insert if I can determine that this entity is already
496  // owned or shared by the receiving processor.
497 
498  if ( entry.second != entry.first->owner_rank() &&
499  ! in_shared( * entry.first , entry.second ) ) {
500 
501  std::pair< std::set<EntityProc,EntityLess>::iterator , bool >
502  result = new_send.insert( entry );
503 
504  if ( result.second ) {
505  // A new insertion, must also insert the closure
506 
507  const unsigned etype = entry.first->entity_rank();
508  PairIterRelation irel = entry.first->relations();
509 
510  for ( ; ! irel.empty() ; ++irel ) {
511  if ( irel->entity_rank() < etype ) {
512  EntityProc tmp( irel->entity() , entry.second );
513  insert_transitive_closure( new_send , tmp );
514  }
515  }
516  }
517  }
518 }
519 
520 // Fill a new send list from the receive list.
521 
522 void comm_recv_to_send(
523  BulkData & mesh ,
524  const std::set< Entity * , EntityLess > & new_recv ,
525  std::set< EntityProc , EntityLess > & new_send )
526 {
527  const unsigned parallel_size = mesh.parallel_size();
528 
529  CommAll all( mesh.parallel() );
530 
531  for ( int phase = 0; phase < 2; ++phase) {
532  for ( std::set< Entity * , EntityLess >::const_iterator
533  i = new_recv.begin() ; i != new_recv.end() ; ++i ) {
534  const unsigned owner = (*i)->owner_rank();
535  const EntityKey key = (*i)->key();
536  all.send_buffer( owner ).pack<EntityKey>( key );
537  }
538  if (phase == 0) { //allocation phase
539  all.allocate_buffers( parallel_size / 4 , false /* Not symmetric */ );
540  }
541  else { //communication phase
542  all.communicate();
543  }
544  }
545 
546  for ( unsigned proc_rank = 0 ; proc_rank < parallel_size ; ++proc_rank ) {
547  CommBuffer & buf = all.recv_buffer(proc_rank);
548  while ( buf.remaining() ) {
549  EntityKey key ;
550  buf.unpack<EntityKey>( key );
551  EntityProc tmp( mesh.get_entity( key ) , proc_rank );
552  new_send.insert( tmp );
553  }
554  }
555 }
556 
557 // Synchronize the send list to the receive list.
558 
559 void comm_sync_send_recv(
560  BulkData & mesh ,
561  std::set< EntityProc , EntityLess > & new_send ,
562  std::set< Entity * , EntityLess > & new_recv )
563 {
564  const unsigned parallel_rank = mesh.parallel_rank();
565  const unsigned parallel_size = mesh.parallel_size();
566 
567  CommAll all( mesh.parallel() );
568 
569  // Communication sizing:
570 
571  for ( std::set< EntityProc , EntityLess >::iterator
572  i = new_send.begin() ; i != new_send.end() ; ++i ) {
573  const unsigned owner = i->first->owner_rank();
574  all.send_buffer( i->second ).skip<EntityKey>(2);
575  if ( owner != parallel_rank ) {
576  all.send_buffer( owner ).skip<EntityKey>(2);
577  }
578  }
579 
580  all.allocate_buffers( parallel_size / 4 , false /* Not symmetric */ );
581 
582  // Communication packing (with message content comments):
583  for ( std::set< EntityProc , EntityLess >::iterator
584  i = new_send.begin() ; i != new_send.end() ; ) {
585  const unsigned owner = i->first->owner_rank();
586 
587  // Inform receiver of ghosting, the receiver does not own
588  // and does not share this entity.
589  // The ghost either already exists or is a to-be-done new ghost.
590  // This status will be resolved on the final communication pass
591  // when new ghosts are packed and sent.
592 
593  const EntityKey &entity_key = i->first->key();
594  const uint64_t &proc = i->second;
595 
596  all.send_buffer( i->second ).pack(entity_key).pack(proc);
597 
598  if ( owner != parallel_rank ) {
599  // I am not the owner of this entity.
600  // Inform the owner of this ghosting need.
601  all.send_buffer( owner ).pack(entity_key).pack(proc);
602 
603  // Erase it from my processor's ghosting responsibility:
604  // The iterator passed to the erase method will be invalidated.
605  std::set< EntityProc , EntityLess >::iterator jrem = i ; ++i ;
606  new_send.erase( jrem );
607  }
608  else {
609  ++i ;
610  }
611  }
612 
613  all.communicate();
614 
615  // Communication unpacking:
616  for ( unsigned p = 0 ; p < parallel_size ; ++p ) {
617  CommBuffer & buf = all.recv_buffer(p);
618  while ( buf.remaining() ) {
619 
620  EntityKey entity_key;
621  uint64_t proc(0);
622 
623  buf.unpack(entity_key).unpack(proc);
624 
625  Entity * const e = mesh.get_entity( entity_key );
626 
627  if ( parallel_rank != proc ) {
628  // Receiving a ghosting need for an entity I own.
629  // Add it to my send list.
630  ThrowRequireMsg( e != NULL,
631  "Unknown entity key: " <<
632  MetaData::get(mesh).entity_rank_name(entity_key.rank()) <<
633  "[" << entity_key.id() << "]");
634  EntityProc tmp( e , proc );
635  new_send.insert( tmp );
636  }
637  else if ( e != NULL ) {
638  // I am the receiver for this ghost.
639  // If I already have it add it to the receive list,
640  // otherwise don't worry about it - I will receive
641  // it in the final new-ghosting communication.
642  new_recv.insert( e );
643  }
644  }
645  }
646 }
647 
648 void insert_upward_relations(Entity& rel_entity,
649  const EntityRank rank_of_orig_entity,
650  const unsigned my_rank,
651  const unsigned share_proc,
652  std::vector<EntityProc>& send)
653 {
654  // If related entity is higher rank, I own it, and it is not
655  // already shared by proc, ghost it to the sharing processor.
656  if ( rank_of_orig_entity < rel_entity.entity_rank() &&
657  rel_entity.owner_rank() == my_rank &&
658  ! in_shared( rel_entity , share_proc ) ) {
659 
660  EntityProc entry( &rel_entity , share_proc );
661  send.push_back( entry );
662 
663  // There may be even higher-ranking entities that need to be ghosted, so we must recurse
664  for ( PairIterRelation rel = rel_entity.relations() ; ! rel.empty() ; ++rel ) {
665  Entity * const rel_of_rel_entity = rel->entity();
666  insert_upward_relations(*rel_of_rel_entity, rel_entity.entity_rank(), my_rank, share_proc, send);
667  }
668  }
669 }
670 
671 } // namespace <>
672 
673 //----------------------------------------------------------------------
674 //----------------------------------------------------------------------
675 
676 void BulkData::internal_regenerate_shared_aura()
677 {
678  Trace_("stk_classic::mesh::BulkData::internal_regenerate_shared_aura");
679 
680  require_ok_to_modify();
681 
682  std::vector<EntityProc> send ;
683 
684  // Iterate over all entities with communication info, get the sharing
685  // comm info for each entity, and ensure that upwardly related
686  // entities to the shared entity are ghosted on the sharing proc.
687  for ( std::vector<Entity*>::const_iterator
688  i = entity_comm().begin() ; i != entity_comm().end() ; ++i ) {
689 
690  Entity & entity = **i ;
691 
692  const unsigned erank = entity.entity_rank();
693 
694  const PairIterEntityComm sharing = entity.sharing();
695 
696  for ( size_t j = 0 ; j < sharing.size() ; ++j ) {
697 
698  const unsigned share_proc = sharing[j].proc ;
699 
700  for ( PairIterRelation rel = entity.relations() ; ! rel.empty() ; ++rel ) {
701 
702  Entity * const rel_entity = rel->entity();
703 
704  insert_upward_relations(*rel_entity, erank, m_parallel_rank, share_proc, send);
705  }
706  }
707  }
708 
709  // Add new aura, remove all of the old aura.
710  // The change_ghosting figures out what to actually delete and add.
711  internal_change_ghosting( shared_aura() , send , m_entity_comm );
712 }
713 
714 //----------------------------------------------------------------------
715 //----------------------------------------------------------------------
716 
717 } // namespace mesh
718 } // namespace stk_classic
void declare_relation(Entity &e_from, Entity &e_to, const RelationIdentifier local_id)
Declare a relation and its converse between entities in the same mesh.
The manager of an integrated collection of parts and fields.
Definition: MetaData.hpp:56
Ghosting & create_ghosting(const std::string &name)
Asymmetric parallel relations for owner-to-ghosted mesh entities.
const std::vector< Entity * > & entity_comm() const
All entities with communication information.
Definition: BulkData.hpp:367
Data for ghosting mesh entities.
Definition: Ghosting.hpp:28
void remove(PartVector &v, Part &part)
Remove a part from a properly ordered collection of parts.
Definition: Part.cpp:98
Part & globally_shared_part() const
Subset for the problem domain that is shared with another process. Ghost entities are not members of ...
Definition: MetaData.hpp:98
void change_ghosting(Ghosting &ghosts, const std::vector< EntityProc > &add_send, const std::vector< Entity *> &remove_receive)
Change the members of a ghosting list on the sending processor.
Ghosting & shared_aura() const
Query the shared-entity aura. Is likely to be stale if ownership or sharing has changed and the &#39;modi...
Definition: BulkData.hpp:375
std::pair< Entity *, unsigned > EntityProc
Pairing of an entity with a processor rank.
Definition: Types.hpp:111
int parallel_rank()
function parallel_rank returns the rank of this processor in the current mpi communicator.
Definition: Env.cpp:318
void destroy_all_ghosting()
Empty every single Ghosting. Same result, but more efficient than, calling change_ghosting to remove ...
ParallelMachine parallel() const
The parallel machine.
Definition: BulkData.hpp:79
Part & locally_owned_part() const
Subset for the problem domain that is owned by the local process. Ghost entities are not members of t...
Definition: MetaData.hpp:93
unsigned parallel_size() const
Size of the parallel machine.
Definition: BulkData.hpp:82
Manager for an integrated collection of entities, entity relations, and buckets of field data...
Definition: BulkData.hpp:49
A fundamental unit within the discretization of a problem domain, including but not limited to nodes...
Definition: Entity.hpp:120
Sierra Toolkit.
const std::string & name() const
Text name for printing purposes only.
Definition: Ghosting.hpp:32
unsigned ordinal() const
Ordinal to identify the ghosting subset.
Definition: Ghosting.hpp:35
int parallel_size()
function parallel_size returns the number of processors in the current mpi communicator.
Definition: Env.cpp:314
unsigned parallel_rank() const
Rank of the parallel machine&#39;s local processor.
Definition: BulkData.hpp:85
std::vector< Part *> PartVector
Collections of parts are frequently maintained as a vector of Part pointers.
Definition: Types.hpp:31
PairIter< std::vector< EntityCommInfo >::const_iterator > PairIterEntityComm
Span of ( communication-subset-ordinal , process-rank ) pairs for the communication of an entity...
Definition: Types.hpp:128
bool destroy_entity(Entity *&entity)
Request the destruction an entity on the local process.
Definition: BulkData.cpp:698