Sierra Toolkit  Version of the Day
FieldParallel.cpp
1 /*------------------------------------------------------------------------*/
2 /* Copyright 2010 Sandia Corporation. */
3 /* Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive */
4 /* license for use of this work by or on behalf of the U.S. Government. */
5 /* Export of this program may require a license from the */
6 /* United States Government. */
7 /*------------------------------------------------------------------------*/
8 
9 
10 #include <stdexcept>
11 #include <sstream>
12 #include <algorithm>
13 
14 #include <stk_util/parallel/ParallelComm.hpp>
15 #include <stk_util/parallel/ParallelReduce.hpp>
16 
17 #include <stk_mesh/base/BulkData.hpp>
18 #include <stk_mesh/base/FieldData.hpp>
19 #include <stk_mesh/base/FieldParallel.hpp>
20 
21 
22 namespace stk_classic {
23 namespace mesh {
24 
25 void communicate_field_data(
26  const Ghosting & ghosts ,
27  const std::vector< const FieldBase *> & fields )
28 {
29  if ( fields.empty() ) { return; }
30 
31  const BulkData & mesh = BulkData::get(ghosts);
32  const unsigned parallel_size = mesh.parallel_size();
33  const unsigned parallel_rank = mesh.parallel_rank();
34 
35  const std::vector<const FieldBase *>::const_iterator fe = fields.end();
36  const std::vector<const FieldBase *>::const_iterator fb = fields.begin();
37  std::vector<const FieldBase *>::const_iterator fi ;
38 
39  // Sizing for send and receive
40 
41  const unsigned zero = 0 ;
42  std::vector<unsigned> send_size( parallel_size , zero );
43  std::vector<unsigned> recv_size( parallel_size , zero );
44 
45  for ( std::vector<Entity*>::const_iterator
46  i = mesh.entity_comm().begin() ;
47  i != mesh.entity_comm().end() ; ++i ) {
48  Entity & e = **i ;
49  const bool owned = e.owner_rank() == parallel_rank ;
50 
51  unsigned e_size = 0 ;
52  for ( fi = fb ; fi != fe ; ++fi ) {
53  const FieldBase & f = **fi ;
54  e_size += field_data_size( f , e );
55  }
56 
57  for ( PairIterEntityComm ec = e.comm() ; ! ec.empty() ; ++ec ) {
58  if ( ghosts.ordinal() == ec->ghost_id ) {
59  if ( owned ) {
60  send_size[ ec->proc ] += e_size ;
61  }
62  else {
63  recv_size[ ec->proc ] += e_size ;
64  }
65  }
66  }
67  }
68 
69  // Allocate send and receive buffers:
70 
71  CommAll sparse ;
72 
73  {
74  const unsigned * const s_size = & send_size[0] ;
75  const unsigned * const r_size = & recv_size[0] ;
76  sparse.allocate_buffers( mesh.parallel(), parallel_size / 4 , s_size, r_size);
77  }
78 
79  // Send packing:
80 
81  for ( std::vector<Entity*>::const_iterator
82  i = mesh.entity_comm().begin() ;
83  i != mesh.entity_comm().end() ; ++i ) {
84  Entity & e = **i ;
85  if ( e.owner_rank() == parallel_rank ) {
86 
87  for ( fi = fb ; fi != fe ; ++fi ) {
88  const FieldBase & f = **fi ;
89  const unsigned size = field_data_size( f , e );
90 
91  if ( size ) {
92  unsigned char * ptr =
93  reinterpret_cast<unsigned char *>(field_data( f , e ));
94 
95  for ( PairIterEntityComm ec = e.comm() ; ! ec.empty() ; ++ec ) {
96 
97  if ( ghosts.ordinal() == ec->ghost_id ) {
98  CommBuffer & b = sparse.send_buffer( ec->proc );
99  b.pack<unsigned char>( ptr , size );
100  }
101  }
102  }
103  }
104  }
105  }
106 
107  // Communicate:
108 
109  sparse.communicate();
110 
111  // Unpack for recv:
112 
113  for ( std::vector<Entity*>::const_iterator
114  i = mesh.entity_comm().begin() ;
115  i != mesh.entity_comm().end() ; ++i ) {
116  Entity & e = **i ;
117  if ( e.owner_rank() != parallel_rank ) {
118 
119  for ( fi = fb ; fi != fe ; ++fi ) {
120  const FieldBase & f = **fi ;
121  const unsigned size = field_data_size( f , e );
122 
123  if ( size ) {
124  unsigned char * ptr =
125  reinterpret_cast<unsigned char *>(field_data( f , e ));
126 
127  for ( PairIterEntityComm ec = e.comm() ; ! ec.empty() ; ++ec ) {
128 
129  if ( ghosts.ordinal() == ec->ghost_id ) {
130  CommBuffer & b = sparse.recv_buffer( ec->proc );
131  b.unpack<unsigned char>( ptr , size );
132  }
133  }
134  }
135  }
136  }
137  }
138 }
139 
140 // Heterogeneity?
141 
142 void communicate_field_data(
143  ParallelMachine machine,
144  const std::vector<EntityProc> & domain ,
145  const std::vector<EntityProc> & range ,
146  const std::vector<const FieldBase *> & fields)
147 {
148  if ( fields.empty() ) { return; }
149 
150  const unsigned parallel_size = parallel_machine_size( machine );
151  const unsigned parallel_rank = parallel_machine_rank( machine );
152  const bool asymmetric = & domain != & range ;
153 
154  const std::vector<const FieldBase *>::const_iterator fe = fields.end();
155  const std::vector<const FieldBase *>::const_iterator fb = fields.begin();
156  std::vector<const FieldBase *>::const_iterator fi ;
157 
158  // Sizing for send and receive
159 
160  const unsigned zero = 0 ;
161  std::vector<unsigned> send_size( parallel_size , zero );
162  std::vector<unsigned> recv_size( parallel_size , zero );
163 
164  std::vector<EntityProc>::const_iterator i ;
165 
166  for ( i = domain.begin() ; i != domain.end() ; ++i ) {
167  Entity & e = * i->first ;
168  const unsigned p = i->second ;
169 
170  if ( asymmetric || parallel_rank == e.owner_rank() ) {
171  unsigned e_size = 0 ;
172  for ( fi = fb ; fi != fe ; ++fi ) {
173  const FieldBase & f = **fi ;
174  e_size += field_data_size( f , e );
175  }
176  send_size[ p ] += e_size ;
177  }
178  }
179 
180  for ( i = range.begin() ; i != range.end() ; ++i ) {
181  Entity & e = * i->first ;
182  const unsigned p = i->second ;
183 
184  if ( asymmetric || p == e.owner_rank() ) {
185  unsigned e_size = 0 ;
186  for ( fi = fb ; fi != fe ; ++fi ) {
187  const FieldBase & f = **fi ;
188  e_size += field_data_size( f , e );
189  }
190  recv_size[ p ] += e_size ;
191  }
192  }
193 
194  // Allocate send and receive buffers:
195 
196  CommAll sparse ;
197 
198  {
199  const unsigned * const s_size = & send_size[0] ;
200  const unsigned * const r_size = & recv_size[0] ;
201  sparse.allocate_buffers( machine, parallel_size / 4 , s_size, r_size);
202  }
203 
204  // Pack for send:
205 
206  for ( i = domain.begin() ; i != domain.end() ; ++i ) {
207  Entity & e = * i->first ;
208  const unsigned p = i->second ;
209 
210  if ( asymmetric || parallel_rank == e.owner_rank() ) {
211  CommBuffer & b = sparse.send_buffer( p );
212  for ( fi = fb ; fi != fe ; ++fi ) {
213  const FieldBase & f = **fi ;
214  const unsigned size = field_data_size( f , e );
215  if ( size ) {
216  unsigned char * ptr = reinterpret_cast<unsigned char *>(field_data( f , e ));
217  b.pack<unsigned char>( ptr , size );
218  }
219  }
220  }
221  }
222 
223  // Communicate:
224 
225  sparse.communicate();
226 
227  // Unpack for recv:
228 
229  for ( i = range.begin() ; i != range.end() ; ++i ) {
230  Entity & e = * i->first ;
231  const unsigned p = i->second ;
232 
233  if ( asymmetric || p == e.owner_rank() ) {
234  CommBuffer & b = sparse.recv_buffer( p );
235  for ( fi = fb ; fi != fe ; ++fi ) {
236  const FieldBase & f = **fi ;
237  const unsigned size = field_data_size( f , e );
238  if ( size ) {
239  unsigned char * ptr = reinterpret_cast<unsigned char *>(field_data( f , e ));
240  b.unpack<unsigned char>( ptr , size );
241  }
242  }
243  }
244  }
245 }
246 
247 //----------------------------------------------------------------------
248 
249 void communicate_field_data(
250  const BulkData & mesh ,
251  const unsigned field_count ,
252  const FieldBase * fields[] ,
253  CommAll & sparse )
254 {
255  const std::vector<Entity*> & entity_comm = mesh.entity_comm();
256 
257  const unsigned parallel_size = mesh.parallel_size();
258 
259  // Sizing for send and receive
260 
261  const unsigned zero = 0 ;
262  std::vector<unsigned> msg_size( parallel_size , zero );
263 
264  size_t j = 0;
265 
266  for ( j = 0 ; j < field_count ; ++j ) {
267  const FieldBase & f = * fields[j] ;
268  for ( std::vector<Entity*>::const_iterator
269  i = entity_comm.begin() ; i != entity_comm.end() ; ++i ) {
270  Entity & e = **i ;
271  const unsigned size = field_data_size( f , e );
272  if ( size ) {
273  for ( PairIterEntityComm
274  ec = e.comm() ; ! ec.empty() && ec->ghost_id == 0 ; ++ec ) {
275  msg_size[ ec->proc ] += size ;
276  }
277  }
278  }
279  }
280 
281  // Allocate send and receive buffers:
282 
283  {
284  const unsigned * const s_size = & msg_size[0] ;
285  sparse.allocate_buffers( mesh.parallel(), parallel_size / 4 , s_size, s_size);
286  }
287 
288  // Pack for send:
289 
290  for ( j = 0 ; j < field_count ; ++j ) {
291  const FieldBase & f = * fields[j] ;
292  for ( std::vector<Entity*>::const_iterator
293  i = entity_comm.begin() ; i != entity_comm.end() ; ++i ) {
294  Entity & e = **i ;
295  const unsigned size = field_data_size( f , e );
296  if ( size ) {
297  unsigned char * ptr =
298  reinterpret_cast<unsigned char *>(field_data( f , e ));
299  for ( PairIterEntityComm
300  ec = e.comm() ; ! ec.empty() && ec->ghost_id == 0 ; ++ec ) {
301  CommBuffer & b = sparse.send_buffer( ec->proc );
302  b.pack<unsigned char>( ptr , size );
303  }
304  }
305  }
306  }
307 
308  // Communicate:
309 
310  sparse.communicate();
311 }
312 
313 void communicate_field_data_verify_read( CommAll & sparse )
314 {
315  std::ostringstream msg ;
316  int error = 0 ;
317  for ( unsigned p = 0 ; p < sparse.parallel_size() ; ++p ) {
318  if ( sparse.recv_buffer( p ).remaining() ) {
319  msg << "P" << sparse.parallel_rank()
320  << " Unread data from P" << p << std::endl ;
321  error = 1 ;
322  }
323  }
324  all_reduce( sparse.parallel() , ReduceSum<1>( & error ) );
325  ThrowErrorMsgIf( error, msg.str() );
326 }
327 
328 //----------------------------------------------------------------------
329 
330 } // namespace mesh
331 } // namespace stk_classic
332 
unsigned field_data_size(const FieldBase &f, const Bucket &k)
Size, in bytes, of the field data for each entity.
Definition: FieldData.hpp:99
FieldTraits< field_type >::data_type * field_data(const field_type &f, const Bucket::iterator i)
Pointer to the field data array.
Definition: FieldData.hpp:116
int parallel_rank()
function parallel_rank returns the rank of this processor in the current mpi communicator.
Definition: Env.cpp:318
unsigned parallel_machine_rank(ParallelMachine parallel_machine)
Member function parallel_machine_rank ...
Definition: Parallel.cpp:29
unsigned parallel_machine_size(ParallelMachine parallel_machine)
Member function parallel_machine_size ...
Definition: Parallel.cpp:18
Sierra Toolkit.
MPI_Comm ParallelMachine
Definition: Parallel.hpp:32
int parallel_size()
function parallel_size returns the number of processors in the current mpi communicator.
Definition: Env.cpp:314
PairIter< std::vector< EntityCommInfo >::const_iterator > PairIterEntityComm
Span of ( communication-subset-ordinal , process-rank ) pairs for the communication of an entity...
Definition: Types.hpp:128