xdr_io.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2014 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 
19 
20 // C++ includes
21 #include <iostream>
22 #include <iomanip>
23 #include <cstdio>
24 #include <vector>
25 #include <string>
26 
27 // Local includes
28 #include "libmesh/xdr_io.h"
29 #include "libmesh/legacy_xdr_io.h"
30 #include "libmesh/xdr_cxx.h"
31 #include "libmesh/enum_xdr_mode.h"
32 #include "libmesh/mesh_base.h"
33 #include "libmesh/node.h"
34 #include "libmesh/elem.h"
35 #include "libmesh/boundary_info.h"
36 #include "libmesh/parallel.h"
37 #include "libmesh/mesh_tools.h"
38 #include "libmesh/partitioner.h"
40 
41 namespace libMesh
42 {
43 
44 //-----------------------------------------------
45 // anonymous namespace for implementation details
46 namespace {
47  struct DofBCData
48  {
49  unsigned int dof_id;
50  unsigned short int side;
52 
53  // Default constructor
54  DofBCData (unsigned int dof_id_in=0,
55  unsigned short int side_in=0,
56  boundary_id_type bc_id_in=0) :
57  dof_id(dof_id_in),
58  side(side_in),
59  bc_id(bc_id_in)
60  {}
61 
62  // comparison operator
63  bool operator < (const DofBCData &other) const
64  {
65  if (this->dof_id == other.dof_id)
66  return (this->side < other.side);
67 
68  return this->dof_id < other.dof_id;
69  }
70  };
71 
72  // comparison operator
73  bool operator < (const unsigned int &other_dof_id,
74  const DofBCData &dof_bc)
75  {
76  return other_dof_id < dof_bc.dof_id;
77  }
78 
79  bool operator < (const DofBCData &dof_bc,
80  const unsigned int &other_dof_id)
81 
82  {
83  return dof_bc.dof_id < other_dof_id;
84  }
85 
86 
87  // For some reason SunStudio does not seem to accept the above
88  // comparison functions for use in
89  // std::equal_range (ElemBCData::iterator, ElemBCData::iterator, unsigned int);
90 #if defined(__SUNPRO_CC) || defined(__PGI)
91  struct CompareIntDofBCData
92  {
93  bool operator()(const unsigned int &other_dof_id,
94  const DofBCData &dof_bc)
95  {
96  return other_dof_id < dof_bc.dof_id;
97  }
98 
99  bool operator()(const DofBCData &dof_bc,
100  const unsigned int &other_dof_id)
101  {
102  return dof_bc.dof_id < other_dof_id;
103  }
104  };
105 #endif
106 }
107 
108 
109 
110 // ------------------------------------------------------------
111 // XdrIO static data
112 const std::size_t XdrIO::io_blksize = 128000;
113 
114 
115 
116 // ------------------------------------------------------------
117 // XdrIO members
118 XdrIO::XdrIO (MeshBase& mesh, const bool binary_in) :
119  MeshInput<MeshBase> (mesh,/* is_parallel_format = */ true),
120  MeshOutput<MeshBase>(mesh,/* is_parallel_format = */ true),
121  ParallelObject (mesh),
122  _binary (binary_in),
123  _legacy (false),
124  _write_serial (false),
125  _write_parallel (false),
126 #ifdef LIBMESH_ENABLE_UNIQUE_ID
127  _write_unique_id (true),
128 #else
129  _write_unique_id (false),
130 #endif
131  _field_width (4), // In 0.7.0, all fields are 4 bytes, in 0.9.2 they can vary
132  _version ("libMesh-0.9.2+"),
133  _bc_file_name ("n/a"),
134  _partition_map_file ("n/a"),
135  _subdomain_map_file ("n/a"),
136  _p_level_file ("n/a")
137 {
138 }
139 
140 
141 
142 XdrIO::XdrIO (const MeshBase& mesh, const bool binary_in) :
143  MeshOutput<MeshBase>(mesh,/* is_parallel_format = */ true),
144  ParallelObject (mesh),
145  _binary (binary_in)
146 {
147 }
148 
149 
150 
152 {
153 }
154 
155 
156 
157 void XdrIO::write (const std::string& name)
158 {
159  if (this->legacy())
160  {
161  libmesh_deprecated();
162 
163  // We don't support writing parallel files in the legacy format
165 
167  return;
168  }
169 
170  Xdr io ((this->processor_id() == 0) ? name : "", this->binary() ? ENCODE : WRITE);
171 
172  START_LOG("write()","XdrIO");
173 
174  // convenient reference to our mesh
176 
178  n_elem = mesh.n_elem(),
179  n_nodes = mesh.n_nodes();
180 
181  libmesh_assert(n_elem == mesh.n_elem());
182  libmesh_assert(n_nodes == mesh.n_nodes());
183 
184  std::size_t
185  n_bcs = mesh.boundary_info->n_boundary_conds();
186  std::size_t
187  n_nodesets = mesh.boundary_info->n_nodeset_conds();
188  unsigned int
190 
191  bool write_parallel_files = this->write_parallel();
192 
193  //-------------------------------------------------------------
194  // For all the optional files -- the default file name is "n/a".
195  // However, the user may specify an optional external file.
196 
197  // If there are BCs and the user has not already provided a
198  // file name then write to "."
199  if ((n_bcs || n_nodesets) &&
200  this->boundary_condition_file_name() == "n/a")
201  this->boundary_condition_file_name() = ".";
202 
203  // If there are more than one subdomains and the user has not specified an
204  // external file then write the subdomain mapping to the default file "."
205  if ((mesh.n_subdomains() > 0) &&
206  (this->subdomain_map_file_name() == "n/a"))
207  this->subdomain_map_file_name() = ".";
208 
209  // In general we don't write the partition information.
210 
211  // If we have p levels and the user has not already provided
212  // a file name then write to "."
213  if ((n_p_levels > 1) &&
214  (this->polynomial_level_file_name() == "n/a"))
215  this->polynomial_level_file_name() = ".";
216 
217  // write the header
218  if (this->processor_id() == 0)
219  {
220  std::string full_ver = this->version() + (write_parallel_files ? " parallel" : "");
221  io.data (full_ver);
222 
223  io.data (n_elem, "# number of elements");
224  io.data (n_nodes, "# number of nodes");
225 
226  io.data (this->boundary_condition_file_name(), "# boundary condition specification file");
227  io.data (this->subdomain_map_file_name(), "# subdomain id specification file");
228  io.data (this->partition_map_file_name(), "# processor id specification file");
229  io.data (this->polynomial_level_file_name(), "# p-level specification file");
230 
231  // Version 0.9.2+ introduces sizes for each type
232  header_id_type write_size = sizeof(xdr_id_type), zero_size = 0;
233 
234  const bool
235  write_p_level = ("." == this->polynomial_level_file_name()),
236  write_partitioning = ("." == this->partition_map_file_name()),
237  write_subdomain_id = ("." == this->subdomain_map_file_name()),
238  write_bcs = ("." == this->boundary_condition_file_name());
239 
240  io.data (write_size, "# type size");
241  io.data (_write_unique_id ? write_size : zero_size, "# uid size");
242  io.data (write_partitioning ? write_size : zero_size, "# pid size");
243  io.data (write_subdomain_id ? write_size : zero_size, "# sid size");
244  io.data (write_p_level ? write_size : zero_size, "# p-level size");
245  // Boundary Condition sizes
246  io.data (write_bcs ? write_size : zero_size, "# eid size"); // elem id
247  io.data (write_bcs ? write_size : zero_size, "# side size "); // side number
248  io.data (write_bcs ? write_size : zero_size, "# bid size"); // boundary id
249  }
250 
251  if (write_parallel_files)
252  {
253  // Parallel xdr mesh files aren't implemented yet; until they
254  // are we'll just warn the user and write a serial file.
255  libMesh::out << "Warning! Parallel xda/xdr is not yet implemented.\n";
256  libMesh::out << "Writing a serialized file instead." << std::endl;
257 
258  // write subdomain names
260 
261  // write connectivity
262  this->write_serialized_connectivity (io, n_elem);
263 
264  // write the nodal locations
265  this->write_serialized_nodes (io, n_nodes);
266 
267  // write the boundary condition information
268  this->write_serialized_bcs (io, n_bcs);
269 
270  // write the nodeset information
271  this->write_serialized_nodesets (io, n_nodesets);
272  }
273  else
274  {
275  // write subdomain names
277 
278  // write connectivity
279  this->write_serialized_connectivity (io, n_elem);
280 
281  // write the nodal locations
282  this->write_serialized_nodes (io, n_nodes);
283 
284  // write the boundary condition information
285  this->write_serialized_bcs (io, n_bcs);
286 
287  // write the nodeset information
288  this->write_serialized_nodesets (io, n_nodesets);
289  }
290 
291  if(mesh.boundary_info->n_edge_conds() > 0)
292  {
293  libMesh::out << "Warning: Mesh contains edge boundary IDs, but these "
294  << "are not supported by the XDR format."
295  << std::endl;
296  }
297 
298  STOP_LOG("write()","XdrIO");
299 
300  // pause all processes until the writing ends -- this will
301  // protect for the pathological case where a write is
302  // followed immediately by a read. The write must be
303  // guaranteed to complete first.
304  io.close();
305  this->comm().barrier();
306 }
307 
308 
309 
311 {
312  if (this->processor_id() == 0)
313  {
315 
316  const std::map<subdomain_id_type, std::string> & subdomain_map = mesh.get_subdomain_name_map();
317 
318  std::vector<header_id_type> subdomain_ids; subdomain_ids.reserve(subdomain_map.size());
319  std::vector<std::string> subdomain_names; subdomain_names.reserve(subdomain_map.size());
320 
321  // We need to loop over the map and make sure that there aren't any invalid entries. Since we
322  // return writable references in mesh_base, it's possible for the user to leave some entity names
323  // blank. We can't write those to the XDA file.
324  header_id_type n_subdomain_names = 0;
325  std::map<subdomain_id_type, std::string>::const_iterator it_end = subdomain_map.end();
326  for (std::map<subdomain_id_type, std::string>::const_iterator it = subdomain_map.begin(); it != it_end; ++it)
327  {
328  if (!it->second.empty())
329  {
330  n_subdomain_names++;
331  subdomain_ids.push_back(it->first);
332  subdomain_names.push_back(it->second);
333  }
334  }
335 
336  io.data(n_subdomain_names, "# subdomain id to name map");
337  // Write out the ids and names in two vectors
338  if (n_subdomain_names)
339  {
340  io.data(subdomain_ids);
341  io.data(subdomain_names);
342  }
343  }
344 }
345 
346 
347 
348 void XdrIO::write_serialized_connectivity (Xdr &io, const dof_id_type libmesh_dbg_var(n_elem)) const
349 {
350  libmesh_assert (io.writing());
351 
352  const bool
353  write_p_level = ("." == this->polynomial_level_file_name()),
354  write_partitioning = ("." == this->partition_map_file_name()),
355  write_subdomain_id = ("." == this->subdomain_map_file_name());
356 
357  // convenient reference to our mesh
359  libmesh_assert_equal_to (n_elem, mesh.n_elem());
360 
361  // We will only write active elements and their parents.
362  const unsigned int n_active_levels = MeshTools::n_active_levels (mesh);
363  std::vector<xdr_id_type>
364  n_global_elem_at_level(n_active_levels), n_local_elem_at_level(n_active_levels);
365 
367 
368  // Find the number of local and global elements at each level
369 #ifndef NDEBUG
370  dof_id_type tot_n_elem = 0;
371 #endif
372  for (unsigned int level=0; level<n_active_levels; level++)
373  {
374  it = mesh.local_level_elements_begin(level);
375  end = mesh.local_level_elements_end(level);
376 
377  n_local_elem_at_level[level] = n_global_elem_at_level[level] = MeshTools::n_elem(it, end);
378 
379  this->comm().sum(n_global_elem_at_level[level]);
380 #ifndef NDEBUG
381  tot_n_elem += n_global_elem_at_level[level];
382 #endif
383  libmesh_assert_less_equal (n_global_elem_at_level[level], n_elem);
384  libmesh_assert_less_equal (tot_n_elem, n_elem);
385  }
386 
387  std::vector<xdr_id_type>
388  xfer_conn, recv_conn;
389  std::vector<dof_id_type>
390  n_elem_on_proc(this->n_processors()), processor_offsets(this->n_processors());
391  std::vector<xdr_id_type> output_buffer;
392  std::vector<std::size_t>
393  xfer_buf_sizes(this->n_processors());
394 
395  typedef std::map<dof_id_type, std::pair<processor_id_type, dof_id_type> > id_map_type;
396  id_map_type parent_id_map, child_id_map;
397 
398  dof_id_type my_next_elem=0, next_global_elem=0;
399 
400  //-------------------------------------------
401  // First write the level-0 elements directly.
402  it = mesh.local_level_elements_begin(0);
403  end = mesh.local_level_elements_end(0);
404  for (; it != end; ++it)
405  {
406  pack_element (xfer_conn, *it);
407  parent_id_map[(*it)->id()] = std::make_pair(this->processor_id(),
408  my_next_elem++);
409  }
410  xfer_conn.push_back(my_next_elem); // toss in the number of elements transferred.
411 
412  std::size_t my_size = xfer_conn.size();
413  this->comm().gather (0, my_next_elem, n_elem_on_proc);
414  this->comm().gather (0, my_size, xfer_buf_sizes);
415 
416  processor_offsets[0] = 0;
417  for (unsigned int pid=1; pid<this->n_processors(); pid++)
418  processor_offsets[pid] = processor_offsets[pid-1] + n_elem_on_proc[pid-1];
419 
420  // All processors send their xfer buffers to processor 0.
421  // Processor 0 will receive the data and write out the elements.
422  if (this->processor_id() == 0)
423  {
424  // Write the number of elements at this level.
425  {
426  std::string comment = "# n_elem at level 0", legend = ", [ type ";
427  if (_write_unique_id)
428  legend += "uid ";
429  if (write_partitioning)
430  legend += "pid ";
431  if (write_subdomain_id)
432  legend += "sid ";
433  if (write_p_level)
434  legend += "p_level ";
435  legend += "(n0 ... nN-1) ]";
436  comment += legend;
437  io.data (n_global_elem_at_level[0], comment.c_str());
438  }
439 
440  for (unsigned int pid=0; pid<this->n_processors(); pid++)
441  {
442  recv_conn.resize(xfer_buf_sizes[pid]);
443  if (pid == 0)
444  recv_conn = xfer_conn;
445  else
446  this->comm().receive (pid, recv_conn);
447 
448  // at a minimum, the buffer should contain the number of elements,
449  // which could be 0.
450  libmesh_assert (!recv_conn.empty());
451 
452  {
453  const xdr_id_type n_elem_received = recv_conn.back();
454  std::vector<xdr_id_type>::const_iterator recv_conn_iter = recv_conn.begin();
455 
456  for (xdr_id_type elem=0; elem<n_elem_received; elem++, next_global_elem++)
457  {
458  output_buffer.clear();
459  const xdr_id_type n_nodes = *recv_conn_iter; ++recv_conn_iter;
460  output_buffer.push_back(*recv_conn_iter); /* type */ ++recv_conn_iter;
461 
462  if (_write_unique_id)
463  output_buffer.push_back(*recv_conn_iter); /* unique_id */ ++recv_conn_iter;
464 
465  if (write_partitioning)
466  output_buffer.push_back(*recv_conn_iter); /* processor id */ ++recv_conn_iter;
467 
468  if (write_subdomain_id)
469  output_buffer.push_back(*recv_conn_iter); /* subdomain id */ ++recv_conn_iter;
470 
471 #ifdef LIBMESH_ENABLE_AMR
472  if (write_p_level)
473  output_buffer.push_back(*recv_conn_iter); /* p level */ ++recv_conn_iter;
474 #endif
475  for (dof_id_type node=0; node<n_nodes; node++, ++recv_conn_iter)
476  output_buffer.push_back(*recv_conn_iter);
477 
478  io.data_stream (&output_buffer[0], output_buffer.size(), output_buffer.size());
479  }
480  }
481  }
482  }
483  else
484  this->comm().send (0, xfer_conn);
485 
486 #ifdef LIBMESH_ENABLE_AMR
487  //--------------------------------------------------------------------
488  // Next write the remaining elements indirectly through their parents.
489  // This will insure that the children are written in the proper order
490  // so they can be reconstructed properly.
491  for (unsigned int level=1; level<n_active_levels; level++)
492  {
493  xfer_conn.clear();
494 
495  it = mesh.local_level_elements_begin(level-1);
496  end = mesh.local_level_elements_end (level-1);
497 
498  dof_id_type my_n_elem_written_at_level = 0;
499  for (; it != end; ++it)
500  if (!(*it)->active()) // we only want the parents elements at this level, and
501  { // there is no direct iterator for this obscure use
502  const Elem *parent = *it;
503  id_map_type::iterator pos = parent_id_map.find(parent->id());
504  libmesh_assert (pos != parent_id_map.end());
505  const processor_id_type parent_pid = pos->second.first;
506  const dof_id_type parent_id = pos->second.second;
507  parent_id_map.erase(pos);
508 
509  for (unsigned int c=0; c<parent->n_children(); c++, my_next_elem++)
510  {
511  const Elem *child = parent->child(c);
512  pack_element (xfer_conn, child, parent_id, parent_pid);
513 
514  // this aproach introduces the possibility that we write
515  // non-local elements. These elements may well be parents
516  // at the next step
517  child_id_map[child->id()] = std::make_pair (child->processor_id(),
518  my_n_elem_written_at_level++);
519  }
520  }
521  xfer_conn.push_back(my_n_elem_written_at_level);
522  my_size = xfer_conn.size();
523  this->comm().gather (0, my_size, xfer_buf_sizes);
524 
525  // Processor 0 will receive the data and write the elements.
526  if (this->processor_id() == 0)
527  {
528  // Write the number of elements at this level.
529  {
530  char buf[80];
531  std::sprintf(buf, "# n_elem at level %d", level);
532  std::string comment(buf), legend = ", [ type ";
533 
534  if (_write_unique_id)
535  legend += "uid ";
536  legend += "parent ";
537  if (write_partitioning)
538  legend += "pid ";
539  if (write_subdomain_id)
540  legend += "sid ";
541  if (write_p_level)
542  legend += "p_level ";
543  legend += "(n0 ... nN-1) ]";
544  comment += legend;
545  io.data (n_global_elem_at_level[level], comment.c_str());
546  }
547 
548  for (unsigned int pid=0; pid<this->n_processors(); pid++)
549  {
550  recv_conn.resize(xfer_buf_sizes[pid]);
551  if (pid == 0)
552  recv_conn = xfer_conn;
553  else
554  this->comm().receive (pid, recv_conn);
555 
556  // at a minimum, the buffer should contain the number of elements,
557  // which could be 0.
558  libmesh_assert (!recv_conn.empty());
559 
560  {
561  const xdr_id_type n_elem_received = recv_conn.back();
562  std::vector<xdr_id_type>::const_iterator recv_conn_iter = recv_conn.begin();
563 
564  for (xdr_id_type elem=0; elem<n_elem_received; elem++, next_global_elem++)
565  {
566  output_buffer.clear();
567  const xdr_id_type n_nodes = *recv_conn_iter; ++recv_conn_iter;
568  output_buffer.push_back(*recv_conn_iter); /* type */ ++recv_conn_iter;
569  if (_write_unique_id)
570  output_buffer.push_back(*recv_conn_iter); /* unique_id */ ++recv_conn_iter;
571 
572  const xdr_id_type parent_local_id = *recv_conn_iter; ++recv_conn_iter;
573  const xdr_id_type parent_pid = *recv_conn_iter; ++recv_conn_iter;
574 
575  output_buffer.push_back (parent_local_id+processor_offsets[parent_pid]);
576 
577  if (write_partitioning)
578  output_buffer.push_back(*recv_conn_iter); /* processor id */ ++recv_conn_iter;
579 
580  if (write_subdomain_id)
581  output_buffer.push_back(*recv_conn_iter); /* subdomain id */ ++recv_conn_iter;
582 
583  if (write_p_level)
584  output_buffer.push_back(*recv_conn_iter); /* p level */ ++recv_conn_iter;
585 
586  for (xdr_id_type node=0; node<n_nodes; node++, ++recv_conn_iter)
587  output_buffer.push_back(*recv_conn_iter);
588 
589  io.data_stream (&output_buffer[0], output_buffer.size(), output_buffer.size());
590  }
591  }
592  }
593  }
594  else
595  this->comm().send (0, xfer_conn);
596 
597  // update the processor_offsets
598  processor_offsets[0] = processor_offsets.back() + n_elem_on_proc.back();
599  this->comm().gather (0, my_n_elem_written_at_level, n_elem_on_proc);
600  for (unsigned int pid=1; pid<this->n_processors(); pid++)
601  processor_offsets[pid] = processor_offsets[pid-1] + n_elem_on_proc[pid-1];
602 
603  // Now, at the next level we will again iterate over local parents. However,
604  // those parents may have been written by other processors (at this step),
605  // so we need to gather them into our *_id_maps.
606  {
607  std::vector<std::vector<dof_id_type> > requested_ids(this->n_processors());
608  std::vector<dof_id_type> request_to_fill;
609 
610  it = mesh.local_level_elements_begin(level);
611  end = mesh.local_level_elements_end(level);
612 
613  for (; it!=end; ++it)
614  if (!child_id_map.count((*it)->id()))
615  {
616  libmesh_assert_not_equal_to ((*it)->parent()->processor_id(), this->processor_id());
617  requested_ids[(*it)->parent()->processor_id()].push_back((*it)->id());
618  }
619 
620  // Next set the child_ids
621  for (unsigned int p=1; p != this->n_processors(); ++p)
622  {
623  // Trade my requests with processor procup and procdown
624  unsigned int procup = (this->processor_id() + p) %
625  this->n_processors();
626  unsigned int procdown = (this->n_processors() +
627  this->processor_id() - p) %
628  this->n_processors();
629 
630  this->comm().send_receive(procup, requested_ids[procup],
631  procdown, request_to_fill);
632 
633  // Fill those requests by overwriting the requested ids
634  for (std::size_t i=0; i<request_to_fill.size(); i++)
635  {
636  libmesh_assert (child_id_map.count(request_to_fill[i]));
637  libmesh_assert_equal_to (child_id_map[request_to_fill[i]].first, procdown);
638 
639  request_to_fill[i] = child_id_map[request_to_fill[i]].second;
640  }
641 
642  // Trade back the results
643  std::vector<dof_id_type> filled_request;
644  this->comm().send_receive(procdown, request_to_fill,
645  procup, filled_request);
646 
647  libmesh_assert_equal_to (filled_request.size(), requested_ids[procup].size());
648 
649  for (std::size_t i=0; i<filled_request.size(); i++)
650  child_id_map[requested_ids[procup][i]] =
651  std::make_pair (procup,
652  filled_request[i]);
653  }
654  // overwrite the parent_id_map with the child_id_map, but
655  // use std::map::swap() for efficiency.
656  parent_id_map.swap(child_id_map); child_id_map.clear();
657  }
658  }
659 #endif // LIBMESH_ENABLE_AMR
660  if (this->processor_id() == 0)
661  libmesh_assert_equal_to (next_global_elem, n_elem);
662 
663 }
664 
665 
666 
668 {
669  // convenient reference to our mesh
671  libmesh_assert_equal_to (n_nodes, mesh.n_nodes());
672 
673  std::vector<dof_id_type> xfer_ids;
674  std::vector<Real> xfer_coords, &coords=xfer_coords;
675 
676  std::vector<std::vector<dof_id_type> > recv_ids (this->n_processors());;
677  std::vector<std::vector<Real> > recv_coords(this->n_processors());
678 
679  std::size_t n_written=0;
680 
681  for (std::size_t blk=0, last_node=0; last_node<n_nodes; blk++)
682  {
683  const std::size_t first_node = blk*io_blksize;
684  last_node = std::min((blk+1)*io_blksize, std::size_t(n_nodes));
685 
686  // Build up the xfer buffers on each processor
688  it = mesh.local_nodes_begin(),
689  end = mesh.local_nodes_end();
690 
691  xfer_ids.clear(); xfer_coords.clear();
692 
693  for (; it!=end; ++it)
694  if (((*it)->id() >= first_node) && // node in [first_node, last_node)
695  ((*it)->id() < last_node))
696  {
697  xfer_ids.push_back((*it)->id());
698  const Point &p = **it;
699  xfer_coords.push_back(p(0));
700 #if LIBMESH_DIM > 1
701  xfer_coords.push_back(p(1));
702 #endif
703 #if LIBMESH_DIM > 2
704  xfer_coords.push_back(p(2));
705 #endif
706  }
707 
708  //-------------------------------------
709  // Send the xfer buffers to processor 0
710  std::vector<std::size_t> ids_size, coords_size;
711 
712  const std::size_t my_ids_size = xfer_ids.size();
713 
714  // explicitly gather ids_size
715  this->comm().gather (0, my_ids_size, ids_size);
716 
717  // infer coords_size on processor 0
718  if (this->processor_id() == 0)
719  {
720  coords_size.reserve(this->n_processors());
721  for (std::size_t p=0; p<ids_size.size(); p++)
722  coords_size.push_back(LIBMESH_DIM*ids_size[p]);
723  }
724 
725  // We will have lots of simultaneous receives if we are
726  // processor 0, so let's use nonblocking receives.
727  std::vector<Parallel::Request>
728  id_request_handles(this->n_processors()-1),
729  coord_request_handles(this->n_processors()-1);
730 
732  id_tag = mesh.comm().get_unique_tag(1234),
733  coord_tag = mesh.comm().get_unique_tag(1235);
734 
735  // Post the receives -- do this on processor 0 only.
736  if (this->processor_id() == 0)
737  {
738  for (unsigned int pid=0; pid<this->n_processors(); pid++)
739  {
740  recv_ids[pid].resize(ids_size[pid]);
741  recv_coords[pid].resize(coords_size[pid]);
742 
743  if (pid == 0)
744  {
745  recv_ids[0] = xfer_ids;
746  recv_coords[0] = xfer_coords;
747  }
748  else
749  {
750  this->comm().receive (pid, recv_ids[pid],
751  id_request_handles[pid-1],
752  id_tag);
753  this->comm().receive (pid, recv_coords[pid],
754  coord_request_handles[pid-1],
755  coord_tag);
756  }
757  }
758  }
759  else
760  {
761  // Send -- do this on all other processors.
762  this->comm().send(0, xfer_ids, id_tag);
763  this->comm().send(0, xfer_coords, coord_tag);
764  }
765 
766  // -------------------------------------------------------
767  // Receive the messages and write the output on processor 0.
768  if (this->processor_id() == 0)
769  {
770  // Wait for all the receives to complete. We have no
771  // need for the statuses since we already know the
772  // buffer sizes.
773  Parallel::wait (id_request_handles);
774  Parallel::wait (coord_request_handles);
775 
776  // Write the coordinates in this block.
777  std::size_t tot_id_size=0, tot_coord_size=0;
778  for (unsigned int pid=0; pid<this->n_processors(); pid++)
779  {
780  tot_id_size += recv_ids[pid].size();
781  tot_coord_size += recv_coords[pid].size();
782  }
783 
784  libmesh_assert_less_equal
785  (tot_id_size, std::min(io_blksize, std::size_t(n_nodes)));
786  libmesh_assert_equal_to (tot_coord_size, LIBMESH_DIM*tot_id_size);
787 
788  coords.resize (3*tot_id_size);
789  for (unsigned int pid=0; pid<this->n_processors(); pid++)
790  for (std::size_t idx=0; idx<recv_ids[pid].size(); idx++)
791  {
792  const std::size_t local_idx = recv_ids[pid][idx] - first_node;
793  libmesh_assert_less ((3*local_idx+2), coords.size());
794  libmesh_assert_less ((LIBMESH_DIM*idx+LIBMESH_DIM-1), recv_coords[pid].size());
795 
796  coords[3*local_idx+0] = recv_coords[pid][LIBMESH_DIM*idx+0];
797 #if LIBMESH_DIM > 1
798  coords[3*local_idx+1] = recv_coords[pid][LIBMESH_DIM*idx+1];
799 #else
800  coords[3*local_idx+1] = 0.;
801 #endif
802 #if LIBMESH_DIM > 2
803  coords[3*local_idx+2] = recv_coords[pid][LIBMESH_DIM*idx+2];
804 #else
805  coords[3*local_idx+2] = 0.;
806 #endif
807 
808  n_written++;
809  }
810 
811  io.data_stream (coords.empty() ? NULL : &coords[0], coords.size(), 3);
812  }
813  }
814  if (this->processor_id() == 0)
815  libmesh_assert_equal_to (n_written, n_nodes);
816 }
817 
818 
819 
820 void XdrIO::write_serialized_bcs (Xdr &io, const std::size_t n_bcs) const
821 {
822  libmesh_assert (io.writing());
823 
824  // convenient reference to our mesh
826 
827  // and our boundary info object
828  const BoundaryInfo &boundary_info = *mesh.boundary_info;
829 
830  // Version 0.9.2+ introduces entity names
831  write_serialized_bc_names(io, boundary_info, true); // sideset names
832 
833  header_id_type n_bcs_out = n_bcs;
834  if (this->processor_id() == 0)
835  io.data (n_bcs_out, "# number of boundary conditions");
836  n_bcs_out = 0;
837 
838  if (!n_bcs) return;
839 
840  std::vector<xdr_id_type> xfer_bcs, recv_bcs;
841  std::vector<std::size_t> bc_sizes(this->n_processors());
842 
843  // Boundary conditions are only specified for level-0 elements
845  it = mesh.local_level_elements_begin(0),
846  end = mesh.local_level_elements_end(0);
847 
848  dof_id_type n_local_level_0_elem=0;
849  for (; it!=end; ++it, n_local_level_0_elem++)
850  {
851  const Elem *elem = *it;
852 
853  for (unsigned int s=0; s<elem->n_sides(); s++)
854 // We're supporting boundary ids on internal sides now
855 // if (elem->neighbor(s) == NULL)
856  {
857  const std::vector<boundary_id_type>& bc_ids =
858  boundary_info.boundary_ids (elem, s);
859  for (std::vector<boundary_id_type>::const_iterator id_it=bc_ids.begin(); id_it!=bc_ids.end(); ++id_it)
860  {
861  const boundary_id_type bc_id = *id_it;
862  if (bc_id != BoundaryInfo::invalid_id)
863  {
864  xfer_bcs.push_back (n_local_level_0_elem);
865  xfer_bcs.push_back (s) ;
866  xfer_bcs.push_back (bc_id);
867  }
868  }
869  }
870  }
871 
872  xfer_bcs.push_back(n_local_level_0_elem);
873  std::size_t my_size = xfer_bcs.size();
874  this->comm().gather (0, my_size, bc_sizes);
875 
876  // All processors send their xfer buffers to processor 0
877  // Processor 0 will receive all buffers and write out the bcs
878  if (this->processor_id() == 0)
879  {
880  dof_id_type elem_offset = 0;
881  for (unsigned int pid=0; pid<this->n_processors(); pid++)
882  {
883  recv_bcs.resize(bc_sizes[pid]);
884  if (pid == 0)
885  recv_bcs = xfer_bcs;
886  else
887  this->comm().receive (pid, recv_bcs);
888 
889  const dof_id_type my_n_local_level_0_elem
890  = recv_bcs.back(); recv_bcs.pop_back();
891 
892  for (std::size_t idx=0; idx<recv_bcs.size(); idx += 3, n_bcs_out++)
893  recv_bcs[idx+0] += elem_offset;
894 
895  io.data_stream (recv_bcs.empty() ? NULL : &recv_bcs[0], recv_bcs.size(), 3);
896  elem_offset += my_n_local_level_0_elem;
897  }
898  libmesh_assert_equal_to (n_bcs, n_bcs_out);
899  }
900  else
901  this->comm().send (0, xfer_bcs);
902 }
903 
904 
905 
906 void XdrIO::write_serialized_nodesets (Xdr &io, const std::size_t n_nodesets) const
907 {
908  libmesh_assert (io.writing());
909 
910  // convenient reference to our mesh
912 
913  // and our boundary info object
914  const BoundaryInfo &boundary_info = *mesh.boundary_info;
915 
916  // Version 0.9.2+ introduces entity names
917  write_serialized_bc_names(io, boundary_info, false); // nodeset names
918 
919  header_id_type n_nodesets_out = n_nodesets;
920  if (this->processor_id() == 0)
921  io.data (n_nodesets_out, "# number of nodesets");
922  n_nodesets_out = 0;
923 
924  if (!n_nodesets) return;
925 
926  std::vector<xdr_id_type> xfer_bcs, recv_bcs;
927  std::vector<std::size_t> bc_sizes(this->n_processors());
928 
930  it = mesh.local_nodes_begin(),
931  end = mesh.local_nodes_end();
932 
933  dof_id_type n_node=0;
934  for (; it!=end; ++it)
935  {
936  const Node *node = *it;
937  const std::vector<boundary_id_type>& nodeset_ids =
938  boundary_info.boundary_ids (node);
939  for (std::vector<boundary_id_type>::const_iterator id_it=nodeset_ids.begin(); id_it!=nodeset_ids.end(); ++id_it)
940  {
941  const boundary_id_type bc_id = *id_it;
942  if (bc_id != BoundaryInfo::invalid_id)
943  {
944  xfer_bcs.push_back ((*it)->id());
945  xfer_bcs.push_back (bc_id);
946  }
947  }
948  }
949 
950  xfer_bcs.push_back(n_node);
951  std::size_t my_size = xfer_bcs.size();
952  this->comm().gather (0, my_size, bc_sizes);
953 
954  // All processors send their xfer buffers to processor 0
955  // Processor 0 will receive all buffers and write out the bcs
956  if (this->processor_id() == 0)
957  {
958  dof_id_type node_offset = 0;
959  for (unsigned int pid=0; pid<this->n_processors(); pid++)
960  {
961  recv_bcs.resize(bc_sizes[pid]);
962  if (pid == 0)
963  recv_bcs = xfer_bcs;
964  else
965  this->comm().receive (pid, recv_bcs);
966 
967  const dof_id_type my_n_node
968  = recv_bcs.back(); recv_bcs.pop_back();
969 
970  for (std::size_t idx=0; idx<recv_bcs.size(); idx += 2, n_nodesets_out++)
971  recv_bcs[idx+0] += node_offset;
972 
973  io.data_stream (recv_bcs.empty() ? NULL : &recv_bcs[0], recv_bcs.size(), 2);
974  node_offset += my_n_node;
975  }
976  libmesh_assert_equal_to (n_nodesets, n_nodesets_out);
977  }
978  else
979  this->comm().send (0, xfer_bcs);
980 }
981 
982 
983 
984 void XdrIO::write_serialized_bc_names (Xdr &io, const BoundaryInfo & info, bool is_sideset) const
985 {
986  if (this->processor_id() == 0)
987  {
988  const std::map<boundary_id_type, std::string> & boundary_map = is_sideset ?
990 
991  std::vector<header_id_type> boundary_ids; boundary_ids.reserve(boundary_map.size());
992  std::vector<std::string> boundary_names; boundary_names.reserve(boundary_map.size());
993 
994  // We need to loop over the map and make sure that there aren't any invalid entries. Since we
995  // return writable references in boundary_info, it's possible for the user to leave some entity names
996  // blank. We can't write those to the XDA file.
997  header_id_type n_boundary_names = 0;
998  std::map<boundary_id_type, std::string>::const_iterator it_end = boundary_map.end();
999  for (std::map<boundary_id_type, std::string>::const_iterator it = boundary_map.begin(); it != it_end; ++it)
1000  {
1001  if (!it->second.empty())
1002  {
1003  n_boundary_names++;
1004  boundary_ids.push_back(it->first);
1005  boundary_names.push_back(it->second);
1006  }
1007  }
1008 
1009  if (is_sideset)
1010  io.data(n_boundary_names, "# sideset id to name map");
1011  else
1012  io.data(n_boundary_names, "# nodeset id to name map");
1013  // Write out the ids and names in two vectors
1014  if (n_boundary_names)
1015  {
1016  io.data(boundary_ids);
1017  io.data(boundary_names);
1018  }
1019  }
1020 }
1021 
1022 
1023 
1024 void XdrIO::read (const std::string& name)
1025 {
1026  // Only open the file on processor 0 -- this is especially important because
1027  // there may be an underlying bzip/bunzip going on, and multiple simultaneous
1028  // calls will produce a race condition.
1029  Xdr io (this->processor_id() == 0 ? name : "", this->binary() ? DECODE : READ);
1030 
1031  // convenient reference to our mesh
1033 
1034  // get the version string.
1035  if (this->processor_id() == 0)
1036  io.data (this->version());
1037  this->comm().broadcast (this->version());
1038 
1039  // note that for "legacy" files the first entry is an
1040  // integer -- not a string at all.
1041  this->legacy() = !(this->version().find("libMesh") < this->version().size());
1042 
1043  // Check for a legacy version format.
1044  if (this->legacy())
1045  {
1046  io.close();
1047  LegacyXdrIO(mesh, this->binary()).read(name);
1048  return;
1049  }
1050 
1051  START_LOG("read()","XdrIO");
1052 
1053  std::vector<header_id_type> meta_data(10, sizeof(xdr_id_type));
1054  // type_size, uid_size, pid_size, sid_size, p_level_size, eid_size, side_size, bid_size;
1055  header_id_type pos=0;
1056 
1057  const bool is_version_0_9_2 = this->version().find("0.9.2") != std::string::npos ? true : false;
1058 
1059  if (this->processor_id() == 0)
1060  {
1061  io.data (meta_data[pos++]);
1062  io.data (meta_data[pos++]);
1063  io.data (this->boundary_condition_file_name()); // libMesh::out << "bc_file=" << this->boundary_condition_file_name() << std::endl;
1064  io.data (this->subdomain_map_file_name()); // libMesh::out << "sid_file=" << this->subdomain_map_file_name() << std::endl;
1065  io.data (this->partition_map_file_name()); // libMesh::out << "pid_file=" << this->partition_map_file_name() << std::endl;
1066  io.data (this->polynomial_level_file_name()); // libMesh::out << "pl_file=" << this->polynomial_level_file_name() << std::endl;
1067 
1068  if (is_version_0_9_2)
1069  {
1070  io.data (meta_data[pos++], "# type size");
1071  io.data (meta_data[pos++], "# uid size");
1072  io.data (meta_data[pos++], "# pid size");
1073  io.data (meta_data[pos++], "# sid size");
1074  io.data (meta_data[pos++], "# p-level size");
1075  // Boundary Condition sizes
1076  io.data (meta_data[pos++], "# eid size"); // elem id
1077  io.data (meta_data[pos++], "# side size "); // side number
1078  io.data (meta_data[pos++], "# bid size"); // boundary id
1079  }
1080  }
1081 
1082  // broadcast the n_elems, n_nodes, and size information
1083  this->comm().broadcast (meta_data);
1084 
1085  this->comm().broadcast (this->boundary_condition_file_name());
1086  this->comm().broadcast (this->subdomain_map_file_name());
1087  this->comm().broadcast (this->partition_map_file_name());
1088  this->comm().broadcast (this->polynomial_level_file_name());
1089 
1090  // Tell the mesh how many nodes/elements to expect. Depending on the mesh type,
1091  // this may allow for efficient adding of nodes/elements.
1092  header_id_type n_elem = meta_data[0];
1093  header_id_type n_nodes = meta_data[1];
1094 
1095  mesh.reserve_elem(n_elem);
1096  mesh.reserve_nodes(n_nodes);
1097 
1103  if (is_version_0_9_2)
1104  _field_width = meta_data[2];
1105 
1106  if (_field_width == 4)
1107  {
1108  uint32_t type_size = 0;
1109 
1110  // read subdomain names
1112 
1113  // read connectivity
1114  this->read_serialized_connectivity (io, n_elem, meta_data, type_size);
1115 
1116  // read the nodal locations
1117  this->read_serialized_nodes (io, n_nodes);
1118 
1119  // read the boundary conditions
1120  this->read_serialized_bcs (io, type_size);
1121 
1122  if (is_version_0_9_2)
1123  // read the nodesets
1124  this->read_serialized_nodesets (io, type_size);
1125  }
1126  else if (_field_width == 8)
1127  {
1128  uint64_t type_size = 0;
1129 
1130  // read subdomain names
1132 
1133  // read connectivity
1134  this->read_serialized_connectivity (io, n_elem, meta_data, type_size);
1135 
1136  // read the nodal locations
1137  this->read_serialized_nodes (io, n_nodes);
1138 
1139  // read the boundary conditions
1140  this->read_serialized_bcs (io, type_size);
1141 
1142  if (is_version_0_9_2)
1143  // read the nodesets
1144  this->read_serialized_nodesets (io, type_size);
1145  }
1146 
1147 
1148  STOP_LOG("read()","XdrIO");
1149 
1150  // set the node processor ids
1152 }
1153 
1154 
1155 
1157 {
1158  const bool read_entity_info = this->version().find("0.9.2") != std::string::npos ? true : false;
1159  if (read_entity_info)
1160  {
1162 
1163  unsigned int n_subdomain_names = 0;
1164  std::vector<header_id_type> subdomain_ids;
1165  std::vector<std::string> subdomain_names;
1166 
1167  // Read the sideset names
1168  if (this->processor_id() == 0)
1169  {
1170  io.data(n_subdomain_names);
1171 
1172  subdomain_ids.resize(n_subdomain_names);
1173  subdomain_names.resize(n_subdomain_names);
1174 
1175  if (n_subdomain_names)
1176  {
1177  io.data(subdomain_ids);
1178  io.data(subdomain_names);
1179  }
1180  }
1181 
1182  // Broadcast the subdomain names to all processors
1183  this->comm().broadcast(n_subdomain_names);
1184  if (n_subdomain_names == 0)
1185  return;
1186 
1187  subdomain_ids.resize(n_subdomain_names);
1188  subdomain_names.resize(n_subdomain_names);
1189  this->comm().broadcast(subdomain_ids);
1190  this->comm().broadcast(subdomain_names);
1191 
1192  // Reassemble the named subdomain information
1193  std::map<subdomain_id_type, std::string> & subdomain_map = mesh.set_subdomain_name_map();
1194 
1195  for (unsigned int i=0; i<n_subdomain_names; ++i)
1196  subdomain_map.insert(std::make_pair(subdomain_ids[i], subdomain_names[i]));
1197  }
1198 }
1199 
1200 
1201 template <typename T>
1202 void XdrIO::read_serialized_connectivity (Xdr &io, const dof_id_type n_elem, std::vector<header_id_type> & sizes, T)
1203 {
1204  libmesh_assert (io.reading());
1205 
1206  if (!n_elem) return;
1207 
1208  const bool
1209  read_p_level = ("." == this->polynomial_level_file_name()),
1210  read_partitioning = ("." == this->partition_map_file_name()),
1211  read_subdomain_id = ("." == this->subdomain_map_file_name());
1212 
1213  // convenient reference to our mesh
1215 
1216  // Keep track of what kinds of elements this file contains
1217  elems_of_dimension.clear();
1218  elems_of_dimension.resize(4, false);
1219 
1220  std::vector<T> conn, input_buffer(100 /* oversized ! */);
1221 
1222  int level=-1;
1223 
1224  // Version 0.9.2+ introduces unique ids
1225  const size_t unique_id_size_index = 3;
1226  const bool read_unique_id = this->version().find("0.9.2") != std::string::npos &&
1227  sizes[unique_id_size_index] ? true : false;
1228 
1229  T n_elem_at_level=0, n_processed_at_level=0;
1230  for (std::size_t blk=0, first_elem=0, last_elem=0;
1231  last_elem<n_elem; blk++)
1232  {
1233  first_elem = blk*io_blksize;
1234  last_elem = std::min((blk+1)*io_blksize, std::size_t(n_elem));
1235 
1236  conn.clear();
1237 
1238  if (this->processor_id() == 0)
1239  for (dof_id_type e=first_elem; e<last_elem; e++, n_processed_at_level++)
1240  {
1241  if (n_processed_at_level == n_elem_at_level)
1242  {
1243  // get the number of elements to read at this level
1244  io.data (n_elem_at_level);
1245  n_processed_at_level = 0;
1246  level++;
1247  }
1248 
1249  unsigned int pos = 0;
1250  // get the element type,
1251  io.data_stream (&input_buffer[pos++], 1);
1252 
1253  if (read_unique_id)
1254  io.data_stream (&input_buffer[pos++], 1);
1255  // Older versions won't have this field at all (no increment on pos)
1256 
1257  // maybe the parent
1258  if (level)
1259  io.data_stream (&input_buffer[pos++], 1);
1260  else
1261  // We can't always fit DofObject::invalid_id in an
1262  // xdr_id_type
1263  input_buffer[pos++] = static_cast<T>(-1);
1264 
1265  // maybe the processor id
1266  if (read_partitioning)
1267  io.data_stream (&input_buffer[pos++], 1);
1268  else
1269  input_buffer[pos++] = 0;
1270 
1271  // maybe the subdomain id
1272  if (read_subdomain_id)
1273  io.data_stream (&input_buffer[pos++], 1);
1274  else
1275  input_buffer[pos++] = 0;
1276 
1277  // maybe the p level
1278  if (read_p_level)
1279  io.data_stream (&input_buffer[pos++], 1);
1280  else
1281  input_buffer[pos++] = 0;
1282 
1283  // and all the nodes
1284  libmesh_assert_less (pos+Elem::type_to_n_nodes_map[input_buffer[0]], input_buffer.size());
1285  io.data_stream (&input_buffer[pos], Elem::type_to_n_nodes_map[input_buffer[0]]);
1286  conn.insert (conn.end(),
1287  input_buffer.begin(),
1288  input_buffer.begin() + pos + Elem::type_to_n_nodes_map[input_buffer[0]]);
1289  }
1290 
1291  std::size_t conn_size = conn.size();
1292  this->comm().broadcast(conn_size);
1293  conn.resize (conn_size);
1294  this->comm().broadcast (conn);
1295 
1296  // All processors now have the connectivity for this block.
1297  typename std::vector<T>::const_iterator it = conn.begin();
1298  for (dof_id_type e=first_elem; e<last_elem; e++)
1299  {
1300  const ElemType elem_type = static_cast<ElemType>(*it); ++it;
1301 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1302  unique_id_type unique_id = 0;
1303 #endif
1304  if (read_unique_id)
1305  {
1306 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1307  unique_id = libmesh_cast_int<unique_id_type>(*it);
1308 #endif
1309  ++it;
1310  }
1311  const dof_id_type parent_id =
1312  (*it == static_cast<T>(-1)) ?
1314  libmesh_cast_int<dof_id_type>(*it);
1315  ++it;
1317  libmesh_cast_int<processor_id_type>(*it);
1318  ++it;
1319  const subdomain_id_type subdomain_id =
1320  libmesh_cast_int<subdomain_id_type>(*it);
1321  ++it;
1322 #ifdef LIBMESH_ENABLE_AMR
1323  const unsigned int p_level =
1324  libmesh_cast_int<unsigned int>(*it);
1325 #endif
1326  ++it;
1327 
1328  Elem *parent =
1329  (parent_id == DofObject::invalid_id) ? NULL : mesh.elem(parent_id);
1330 
1331  Elem *elem = Elem::build (elem_type, parent).release();
1332 
1333  elem->set_id() = e;
1334 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1335  elem->set_unique_id() = unique_id;
1336 #endif
1337  elem->processor_id() = processor_id;
1338  elem->subdomain_id() = subdomain_id;
1339 #ifdef LIBMESH_ENABLE_AMR
1340  elem->hack_p_level(p_level);
1341 
1342  if (parent)
1343  {
1344  parent->add_child(elem);
1347  }
1348 #endif
1349 
1350  for (unsigned int n=0; n<elem->n_nodes(); n++, ++it)
1351  {
1352  const dof_id_type global_node_number = *it;
1353 
1354  elem->set_node(n) =
1355  mesh.add_point (Point(), global_node_number);
1356  }
1357 
1358  elems_of_dimension[elem->dim()] = true;
1359  mesh.add_elem(elem);
1360  }
1361  }
1362 
1363  // Set the mesh dimension to the largest encountered for an element
1364  for (unsigned int i=0; i!=4; ++i)
1365  if (elems_of_dimension[i])
1366  mesh.set_mesh_dimension(i);
1367 
1368 #if LIBMESH_DIM < 3
1369  if (mesh.mesh_dimension() > LIBMESH_DIM)
1370  {
1371  libMesh::err << "Cannot open dimension " <<
1372  mesh.mesh_dimension() <<
1373  " mesh file when configured without " <<
1374  mesh.mesh_dimension() << "D support." <<
1375  std::endl;
1376  libmesh_error();
1377  }
1378 #endif
1379 }
1380 
1381 
1382 
1384 {
1385  libmesh_assert (io.reading());
1386 
1387  // convenient reference to our mesh
1389 
1390  if (!mesh.n_nodes()) return;
1391 
1392  // At this point the elements have been read from file and placeholder nodes
1393  // have been assigned. These nodes, however, do not have the proper (x,y,z)
1394  // locations. This method will read all the nodes from disk, and each processor
1395  // can then grab the individual values it needs.
1396 
1397  // build up a list of the nodes contained in our local mesh. These are the nodes
1398  // stored on the local processor whose (x,y,z) values need to be corrected.
1399  std::vector<dof_id_type> needed_nodes; needed_nodes.reserve (mesh.n_nodes());
1400  {
1402  it = mesh.nodes_begin(),
1403  end = mesh.nodes_end();
1404 
1405  for (; it!=end; ++it)
1406  needed_nodes.push_back((*it)->id());
1407 
1408  std::sort (needed_nodes.begin(), needed_nodes.end());
1409 
1410  // We should not have any duplicate node->id()s
1411  libmesh_assert (std::unique(needed_nodes.begin(), needed_nodes.end()) == needed_nodes.end());
1412  }
1413 
1414  // Get the nodes in blocks.
1415  std::vector<Real> coords;
1416  std::pair<std::vector<dof_id_type>::iterator,
1417  std::vector<dof_id_type>::iterator> pos;
1418  pos.first = needed_nodes.begin();
1419 
1420  for (std::size_t blk=0, first_node=0, last_node=0; last_node<n_nodes; blk++)
1421  {
1422  first_node = blk*io_blksize;
1423  last_node = std::min((blk+1)*io_blksize, std::size_t(n_nodes));
1424 
1425  coords.resize(3*(last_node - first_node));
1426 
1427  if (this->processor_id() == 0)
1428  io.data_stream (coords.empty() ? NULL : &coords[0], coords.size());
1429 
1430  // For large numbers of processors the majority of processors at any given
1431  // block may not actually need these data. It may be worth profiling this,
1432  // although it is expected that disk IO will be the bottleneck
1433  this->comm().broadcast (coords);
1434 
1435  for (std::size_t n=first_node, idx=0; n<last_node; n++, idx+=3)
1436  {
1437  // first see if we need this node. use pos.first as a smart lower
1438  // bound, this will ensure that the size of the searched range
1439  // decreases as we match nodes.
1440  pos = std::equal_range (pos.first, needed_nodes.end(), n);
1441 
1442  if (pos.first != pos.second) // we need this node.
1443  {
1444  libmesh_assert_equal_to (*pos.first, n);
1445  mesh.node(n) =
1446  Point (coords[idx+0],
1447  coords[idx+1],
1448  coords[idx+2]);
1449 
1450  }
1451  }
1452  }
1453 }
1454 
1455 
1456 
1457 template <typename T>
1459 {
1460  if (this->boundary_condition_file_name() == "n/a") return;
1461 
1462  libmesh_assert (io.reading());
1463 
1464  // convenient reference to our mesh
1466 
1467  // and our boundary info object
1468  BoundaryInfo &boundary_info = *mesh.boundary_info;
1469 
1470  // Version 0.9.2+ introduces unique ids
1471  read_serialized_bc_names(io, boundary_info, true); // sideset names
1472 
1473  std::vector<DofBCData> dof_bc_data;
1474  std::vector<T> input_buffer;
1475 
1476  header_id_type n_bcs=0;
1477  if (this->processor_id() == 0)
1478  io.data (n_bcs);
1479  this->comm().broadcast (n_bcs);
1480 
1481  for (std::size_t blk=0, first_bc=0, last_bc=0; last_bc<n_bcs; blk++)
1482  {
1483  first_bc = blk*io_blksize;
1484  last_bc = std::min((blk+1)*io_blksize, std::size_t(n_bcs));
1485 
1486  input_buffer.resize (3*(last_bc - first_bc));
1487 
1488  if (this->processor_id() == 0)
1489  io.data_stream (input_buffer.empty() ? NULL : &input_buffer[0], input_buffer.size());
1490 
1491  this->comm().broadcast (input_buffer);
1492  dof_bc_data.clear(); dof_bc_data.reserve (input_buffer.size()/3);
1493 
1494  // convert the input_buffer to DofBCData to facilitate searching
1495  for (std::size_t idx=0; idx<input_buffer.size(); idx+=3)
1496  dof_bc_data.push_back (DofBCData(input_buffer[idx+0],
1497  input_buffer[idx+1],
1498  input_buffer[idx+2]));
1499  input_buffer.clear();
1500  // note that while the files *we* write should already be sorted by
1501  // element id this is not necessarily guaranteed.
1502  std::sort (dof_bc_data.begin(), dof_bc_data.end());
1503 
1505  it = mesh.level_elements_begin(0),
1506  end = mesh.level_elements_end(0);
1507 
1508  // Look for BCs in this block for all the level-0 elements we have
1509  // (not just local ones). Do this by finding all the entries
1510  // in dof_bc_data whose elem_id match the ID of the current element.
1511  // We cannot rely on NULL neighbors at this point since the neighbor
1512  // data structure has not been initialized.
1513  for (std::pair<std::vector<DofBCData>::iterator,
1514  std::vector<DofBCData>::iterator> pos; it!=end; ++it)
1515 #if defined(__SUNPRO_CC) || defined(__PGI)
1516  for (pos = std::equal_range (dof_bc_data.begin(), dof_bc_data.end(), (*it)->id(), CompareIntDofBCData());
1517  pos.first != pos.second; ++pos.first)
1518 #else
1519  for (pos = std::equal_range (dof_bc_data.begin(), dof_bc_data.end(), (*it)->id());
1520  pos.first != pos.second; ++pos.first)
1521 #endif
1522  {
1523  libmesh_assert_equal_to (pos.first->dof_id, (*it)->id());
1524  libmesh_assert_less (pos.first->side, (*it)->n_sides());
1525 
1526  boundary_info.add_side (*it, pos.first->side, pos.first->bc_id);
1527  }
1528  }
1529 }
1530 
1531 
1532 
1533 template <typename T>
1535 {
1536  if (this->boundary_condition_file_name() == "n/a") return;
1537 
1538  libmesh_assert (io.reading());
1539 
1540  // convenient reference to our mesh
1542 
1543  // and our boundary info object
1544  BoundaryInfo &boundary_info = *mesh.boundary_info;
1545 
1546  // Version 0.9.2+ introduces unique ids
1547  read_serialized_bc_names(io, boundary_info, false); // nodeset names
1548 
1549  // TODO: Make a data object that works with both the element and nodal bcs
1550  std::vector<DofBCData> node_bc_data;
1551  std::vector<T> input_buffer;
1552 
1553  header_id_type n_nodesets=0;
1554  if (this->processor_id() == 0)
1555  io.data (n_nodesets);
1556  this->comm().broadcast (n_nodesets);
1557 
1558  for (std::size_t blk=0, first_bc=0, last_bc=0; last_bc<n_nodesets; blk++)
1559  {
1560  first_bc = blk*io_blksize;
1561  last_bc = std::min((blk+1)*io_blksize, std::size_t(n_nodesets));
1562 
1563  input_buffer.resize (2*(last_bc - first_bc));
1564 
1565  if (this->processor_id() == 0)
1566  io.data_stream (input_buffer.empty() ? NULL : &input_buffer[0], input_buffer.size());
1567 
1568  this->comm().broadcast (input_buffer);
1569  node_bc_data.clear(); node_bc_data.reserve (input_buffer.size()/2);
1570 
1571  // convert the input_buffer to DofBCData to facilitate searching
1572  for (std::size_t idx=0; idx<input_buffer.size(); idx+=2)
1573  node_bc_data.push_back (DofBCData(input_buffer[idx+0],
1574  0,
1575  input_buffer[idx+1]));
1576  input_buffer.clear();
1577  // note that while the files *we* write should already be sorted by
1578  // node id this is not necessarily guaranteed.
1579  std::sort (node_bc_data.begin(), node_bc_data.end());
1580 
1582  it = mesh.nodes_begin(),
1583  end = mesh.nodes_end();
1584 
1585  // Look for BCs in this block for all nodes we have
1586  // (not just local ones). Do this by finding all the entries
1587  // in node_bc_data whose dof_id(node_id) match the ID of the current node.
1588  for (std::pair<std::vector<DofBCData>::iterator,
1589  std::vector<DofBCData>::iterator> pos; it!=end; ++it)
1590 #if defined(__SUNPRO_CC) || defined(__PGI)
1591  for (pos = std::equal_range (node_bc_data.begin(), node_bc_data.end(), (*it)->id(), CompareIntDofBCData());
1592  pos.first != pos.second; ++pos.first)
1593 #else
1594  for (pos = std::equal_range (node_bc_data.begin(), node_bc_data.end(), (*it)->id());
1595  pos.first != pos.second; ++pos.first)
1596 #endif
1597  {
1598  // Note: dof_id from ElmeBCData is being used to hold node_id here
1599  libmesh_assert_equal_to (pos.first->dof_id, (*it)->id());
1600 
1601  boundary_info.add_node (*it, pos.first->bc_id);
1602  }
1603  }
1604 }
1605 
1606 
1607 
1608 void XdrIO::read_serialized_bc_names(Xdr &io, BoundaryInfo & info, bool is_sideset)
1609 {
1610  const bool read_entity_info = this->version().find("0.9.2") != std::string::npos ? true : false;
1611  if (read_entity_info)
1612  {
1613  header_id_type n_boundary_names = 0;
1614  std::vector<header_id_type> boundary_ids;
1615  std::vector<std::string> boundary_names;
1616 
1617  // Read the sideset names
1618  if (this->processor_id() == 0)
1619  {
1620  io.data(n_boundary_names);
1621 
1622  boundary_ids.resize(n_boundary_names);
1623  boundary_names.resize(n_boundary_names);
1624 
1625  if (n_boundary_names)
1626  {
1627  io.data(boundary_ids);
1628  io.data(boundary_names);
1629  }
1630  }
1631 
1632  // Broadcast the boundary names to all processors
1633  this->comm().broadcast(n_boundary_names);
1634  if (n_boundary_names == 0)
1635  return;
1636 
1637  boundary_ids.resize(n_boundary_names);
1638  boundary_names.resize(n_boundary_names);
1639  this->comm().broadcast(boundary_ids);
1640  this->comm().broadcast(boundary_names);
1641 
1642  // Reassemble the named boundary information
1643  std::map<boundary_id_type, std::string> & boundary_map = is_sideset ?
1645 
1646  for (unsigned int i=0; i<n_boundary_names; ++i)
1647  boundary_map.insert(std::make_pair(boundary_ids[i], boundary_names[i]));
1648  }
1649 }
1650 
1651 
1652 
1653 void XdrIO::pack_element (std::vector<xdr_id_type> &conn, const Elem *elem,
1654  const dof_id_type parent_id, const dof_id_type parent_pid) const
1655 {
1656  libmesh_assert(elem);
1657  libmesh_assert_equal_to (elem->n_nodes(), Elem::type_to_n_nodes_map[elem->type()]);
1658 
1659  conn.push_back(elem->n_nodes());
1660 
1661  conn.push_back (elem->type());
1662 
1663  // In version 0.7.0+ "id" is stored but it not used. In version 0.9.2+
1664  // we will store unique_id instead, therefore there is no need to
1665  // check for the older version when writing the unique_id.
1666  conn.push_back (elem->unique_id());
1667 
1668  if (parent_id != DofObject::invalid_id)
1669  {
1670  conn.push_back (parent_id);
1671  libmesh_assert_not_equal_to (parent_pid, DofObject::invalid_id);
1672  conn.push_back (parent_pid);
1673  }
1674 
1675  conn.push_back (elem->processor_id());
1676  conn.push_back (elem->subdomain_id());
1677 
1678 #ifdef LIBMESH_ENABLE_AMR
1679  conn.push_back (elem->p_level());
1680 #endif
1681 
1682  for (unsigned int n=0; n<elem->n_nodes(); n++)
1683  conn.push_back (elem->node(n));
1684 }
1685 
1686 } // namespace libMesh

Site Created By: libMesh Developers
Last modified: February 07 2014 16:57:07 UTC

Hosted By:
SourceForge.net Logo