nemesis_io.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2014 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 
19 // C++ includes
20 #include <numeric> // std::accumulate
21 
22 // LibMesh includes
23 #include "libmesh/elem.h"
24 #include "libmesh/exodusII_io.h"
26 #include "libmesh/nemesis_io.h"
28 #include "libmesh/node.h"
29 #include "libmesh/parallel_mesh.h"
30 #include "libmesh/parallel.h"
31 #include "libmesh/utility.h" // is_sorted, deallocate
32 #include "libmesh/boundary_info.h"
34 
35 namespace libMesh
36 {
37 
38 
39 //-----------------------------------------------
40 // anonymous namespace for implementation details
41 namespace {
42  struct CompareGlobalIdxMappings
43  {
44  // strict weak ordering for a.first -> a.second mapping. since we can only map to one
45  // value only order the first entry
46  bool operator()(const std::pair<unsigned int, unsigned int> &a,
47  const std::pair<unsigned int, unsigned int> &b) const
48  { return a.first < b.first; }
49 
50  // strict weak ordering for a.first -> a.second mapping. lookups will
51  // be in terms of a single integer, which is why we need this method.
52  bool operator()(const std::pair<unsigned int, unsigned int> &a,
53  const unsigned int b) const
54  { return a.first < b; }
55  };
56 
57  // Nemesis & ExodusII use int for all integer values, even the ones which
58  // should never be negative. we like to use unsigned as a force of habit,
59  // this trivial little method saves some typing & also makes sure something
60  // is not horribly wrong.
61  template <typename T>
62  inline unsigned int to_uint ( const T &t )
63  {
64  libmesh_assert_equal_to (t, static_cast<T>(static_cast<unsigned int>(t)));
65 
66  return static_cast<unsigned int>(t);
67  }
68 
69  // test equality for a.first -> a.second mapping. since we can only map to one
70  // value only test the first entry
71  inline bool global_idx_mapping_equality (const std::pair<unsigned int, unsigned int> &a,
72  const std::pair<unsigned int, unsigned int> &b)
73  { return a.first == b.first; }
74 }
75 
76 
77 
78 // ------------------------------------------------------------
79 // Nemesis_IO class members
81  MeshInput<MeshBase> (mesh, /*is_parallel_format=*/true),
82  MeshOutput<MeshBase> (mesh, /*is_parallel_format=*/true),
83  ParallelObject (mesh),
84 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
85  nemhelper(new Nemesis_IO_Helper(*this)),
86 #endif
87  _timestep(1),
88  _verbose (false),
89  _append(false)
90 {
91 }
92 
93 
95 {
96 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
97 
98  delete nemhelper;
99 #endif
100 }
101 
102 
103 
104 void Nemesis_IO::verbose (bool set_verbosity)
105 {
106  _verbose = set_verbosity;
107 
108 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
109  // Set the verbose flag in the helper object
110  // as well.
112 #endif
113 }
114 
115 
116 
117 void Nemesis_IO::append(bool val)
118 {
119  _append = val;
120 }
121 
122 
123 
124 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
125 void Nemesis_IO::read (const std::string& base_filename)
126 {
127  // On one processor, Nemesis and ExodusII should be equivalent, so
128  // let's cowardly defer to that implementation...
129  if (this->n_processors() == 1)
130  {
131  // We can do this in one line but if the verbose flag was set in this
132  // object, it will no longer be set... thus no extra print-outs for serial runs.
133  // ExodusII_IO(this->mesh()).read (base_filename); // ambiguous when Nemesis_IO is multiply-inherited
134 
136  ExodusII_IO(mesh).read (base_filename);
137  return;
138  }
139 
140  START_LOG ("read()","Nemesis_IO");
141 
142  // This function must be run on all processors at once
143  parallel_object_only();
144 
145  if (_verbose)
146  {
147  libMesh::out << "[" << this->processor_id() << "] ";
148  libMesh::out << "Reading Nemesis file on processor: " << this->processor_id() << std::endl;
149  }
150 
151  // Construct the Nemesis filename based on the number of processors and the
152  // current processor ID.
153  std::string nemesis_filename = nemhelper->construct_nemesis_filename(base_filename);
154 
155  if (_verbose)
156  libMesh::out << "Opening file: " << nemesis_filename << std::endl;
157 
158  // Open the Exodus file in EX_READ mode
159  nemhelper->open(nemesis_filename.c_str(), /*read_only=*/true);
160 
161  // Get a reference to the mesh. We need to be specific
162  // since Nemesis_IO is multiply-inherited
163  // MeshBase& mesh = this->mesh();
165 
166  // Local information: Read the following information from the standard Exodus header
167  // title[0]
168  // num_dim
169  // num_nodes
170  // num_elem
171  // num_elem_blk
172  // num_node_sets
173  // num_side_sets
176 
177  // Get global information: number of nodes, elems, blocks, nodesets and sidesets
179 
180  // Get "load balance" information. This includes the number of internal & border
181  // nodes and elements as well as the number of communication maps.
183 
184  // Do some error checking
186  {
187  libMesh::err << "ERROR: there should be no external nodes in an element-based partitioning!"
188  << std::endl;
189  libmesh_error();
190  }
191 
192  libmesh_assert_equal_to (nemhelper->num_nodes,
195 
196  libmesh_assert_equal_to (nemhelper->num_elem,
199 
200  libmesh_assert_less_equal (nemhelper->num_nodes, nemhelper->num_nodes_global);
201  libmesh_assert_less_equal (nemhelper->num_elem, nemhelper->num_elems_global);
202 
203  // Read nodes from the exodus file: this fills the nemhelper->x,y,z arrays.
205 
206  // Reads the nemhelper->node_num_map array, node_num_map[i] is the global node number for
207  // local node number i.
209 
210  // The get_cmap_params() function reads in the:
211  // node_cmap_ids[],
212  // node_cmap_node_cnts[],
213  // elem_cmap_ids[],
214  // elem_cmap_elem_cnts[],
216 
217  // Read the IDs of the interior, boundary, and external nodes. This function
218  // fills the vectors:
219  // node_mapi[],
220  // node_mapb[],
221  // node_mape[]
223 
224  // Read each node communication map for this processor. This function
225  // fills the vectors of vectors named:
226  // node_cmap_node_ids[][]
227  // node_cmap_proc_ids[][]
229 
230  libmesh_assert_equal_to (to_uint(nemhelper->num_node_cmaps), nemhelper->node_cmap_node_cnts.size());
231  libmesh_assert_equal_to (to_uint(nemhelper->num_node_cmaps), nemhelper->node_cmap_node_ids.size());
232  libmesh_assert_equal_to (to_uint(nemhelper->num_node_cmaps), nemhelper->node_cmap_proc_ids.size());
233 
234 #ifndef NDEBUG
235  // We expect the communication maps to be symmetric - e.g. if processor i thinks it
236  // communicates with processor j, then processor j should also be expecting to
237  // communicate with i. We can assert that here easily enough with an alltoall,
238  // but let's only do it when not in optimized mode to limit unnecessary communication.
239  {
240  std::vector<unsigned char> pid_send_partner (this->n_processors(), 0);
241 
242  // strictly speaking, we should expect to communicate with ourself...
243  pid_send_partner[this->processor_id()] = 1;
244 
245  // mark each processor id we reference with a node cmap
246  for (unsigned int cmap=0; cmap<to_uint(nemhelper->num_node_cmaps); cmap++)
247  {
248  libmesh_assert_less (to_uint(nemhelper->node_cmap_ids[cmap]), this->n_processors());
249 
250  pid_send_partner[nemhelper->node_cmap_ids[cmap]] = 1;
251  }
252 
253  // Copy the send pairing so we can catch the receive paring and
254  // test for equality
255  const std::vector<unsigned char> pid_recv_partner (pid_send_partner);
256 
257  this->comm().alltoall (pid_send_partner);
258 
259  libmesh_assert (pid_send_partner == pid_recv_partner);
260  }
261 #endif
262 
263  // We now have enough information to infer node ownership. We start by assuming
264  // we own all the nodes on this processor. We will then interrogate the
265  // node cmaps and see if a lower-rank processor is associated with any of
266  // our nodes. If so, then that processor owns the node, not us...
267  std::vector<unsigned short int> node_ownership (nemhelper->num_internal_nodes +
269  this->processor_id());
270 
271  // a map from processor id to cmap number, to be used later
272  std::map<unsigned int, unsigned int> pid_to_cmap_map;
273 
274  // For each node_cmap...
275  for (unsigned int cmap=0; cmap<to_uint(nemhelper->num_node_cmaps); cmap++)
276  {
277  // Good time for error checking...
278  libmesh_assert_equal_to (to_uint(nemhelper->node_cmap_node_cnts[cmap]),
279  nemhelper->node_cmap_node_ids[cmap].size());
280 
281  libmesh_assert_equal_to (to_uint(nemhelper->node_cmap_node_cnts[cmap]),
282  nemhelper->node_cmap_proc_ids[cmap].size());
283 
284  // In all the samples I have seen, node_cmap_ids[cmap] is the processor
285  // rank of the remote processor...
286  const unsigned short int adjcnt_pid_idx = nemhelper->node_cmap_ids[cmap];
287 
288  libmesh_assert_less (adjcnt_pid_idx, this->n_processors());
289  libmesh_assert_not_equal_to (adjcnt_pid_idx, this->processor_id());
290 
291  // We only expect one cmap per adjacent processor
292  libmesh_assert (!pid_to_cmap_map.count(adjcnt_pid_idx));
293 
294  pid_to_cmap_map[adjcnt_pid_idx] = cmap;
295 
296  // ...and each node in that cmap...
297  for (unsigned int idx=0; idx<to_uint(nemhelper->node_cmap_node_cnts[cmap]); idx++)
298  {
299  // Are the node_cmap_ids and node_cmap_proc_ids really redundant?
300  libmesh_assert_equal_to (adjcnt_pid_idx, nemhelper->node_cmap_proc_ids[cmap][idx]);
301 
302  // we are expecting the exodus node numbering to be 1-based...
303  const unsigned int local_node_idx = nemhelper->node_cmap_node_ids[cmap][idx]-1;
304 
305  libmesh_assert_less (local_node_idx, node_ownership.size());
306 
307  // if the adjacent processor is lower rank than the current
308  // owner for this node, then it will get the node...
309  node_ownership[local_node_idx] =
310  std::min(node_ownership[local_node_idx], adjcnt_pid_idx);
311  }
312  } // We now should have established proper node ownership.
313 
314  // now that ownership is established, we can figure out how many nodes we
315  // will be responsible for numbering.
316  unsigned int num_nodes_i_must_number = 0;
317 
318  for (unsigned int idx=0; idx<node_ownership.size(); idx++)
319  if (node_ownership[idx] == this->processor_id())
320  num_nodes_i_must_number++;
321 
322  // more error checking...
323  libmesh_assert_greater_equal (num_nodes_i_must_number, to_uint(nemhelper->num_internal_nodes));
324  libmesh_assert (num_nodes_i_must_number <= to_uint(nemhelper->num_internal_nodes +
326  if (_verbose)
327  libMesh::out << "[" << this->processor_id() << "] "
328  << "num_nodes_i_must_number="
329  << num_nodes_i_must_number
330  << std::endl;
331 
332  // The call to get_loadbal_param() gets 7 pieces of information. We allgather
333  // these now across all processors to determine some global numberings. We should
334  // also gather the number of nodes each processor thinks it will number so that
335  // we can (i) determine our offset, and (ii) do some error checking.
336  std::vector<int> all_loadbal_data ( 8 );
337  all_loadbal_data[0] = nemhelper->num_internal_nodes;
338  all_loadbal_data[1] = nemhelper->num_border_nodes;
339  all_loadbal_data[2] = nemhelper->num_external_nodes;
340  all_loadbal_data[3] = nemhelper->num_internal_elems;
341  all_loadbal_data[4] = nemhelper->num_border_elems;
342  all_loadbal_data[5] = nemhelper->num_node_cmaps;
343  all_loadbal_data[6] = nemhelper->num_elem_cmaps;
344  all_loadbal_data[7] = num_nodes_i_must_number;
345 
346  this->comm().allgather (all_loadbal_data, /* identical_buffer_sizes = */ true);
347 
348  // OK, we are now in a position to request new global indices for all the nodes
349  // we do not own
350 
351  // Let's get a unique message tag to use for send()/receive()
352  Parallel::MessageTag nodes_tag = mesh.comm().get_unique_tag(12345);
353 
354  std::vector<std::vector<int> >
355  needed_node_idxs (nemhelper->num_node_cmaps); // the indices we will ask for
356 
357  std::vector<Parallel::Request>
358  needed_nodes_requests (nemhelper->num_node_cmaps);
359 
360  for (unsigned int cmap=0; cmap<to_uint(nemhelper->num_node_cmaps); cmap++)
361  {
362  // We know we will need no more indices than there are nodes
363  // in this cmap, but that number is an upper bound in general
364  // since the neighboring processor associated with the cmap
365  // may not actually own it
366  needed_node_idxs[cmap].reserve (nemhelper->node_cmap_node_cnts[cmap]);
367 
368  const unsigned int adjcnt_pid_idx = nemhelper->node_cmap_ids[cmap];
369 
370  // ...and each node in that cmap...
371  for (unsigned int idx=0; idx<to_uint(nemhelper->node_cmap_node_cnts[cmap]); idx++)
372  {
373  const unsigned int
374  local_node_idx = nemhelper->node_cmap_node_ids[cmap][idx]-1,
375  owning_pid_idx = node_ownership[local_node_idx];
376 
377  // add it to the request list for its owning processor.
378  if (owning_pid_idx == adjcnt_pid_idx)
379  {
380  const unsigned int
381  global_node_idx = nemhelper->node_num_map[local_node_idx]-1;
382  needed_node_idxs[cmap].push_back(global_node_idx);
383  }
384  }
385  // now post the send for this cmap
386  this->comm().send (adjcnt_pid_idx, // destination
387  needed_node_idxs[cmap], // send buffer
388  needed_nodes_requests[cmap], // request
389  nodes_tag);
390  } // all communication requests for getting updated global indices for border
391  // nodes have been initiated
392 
393  // Figure out how many nodes each processor thinks it will number and make sure
394  // that it adds up to the global number of nodes. Also, set up global node
395  // index offsets for each processor.
396  std::vector<unsigned int>
397  all_num_nodes_i_must_number (this->n_processors());
398 
399  for (unsigned int pid=0; pid<this->n_processors(); pid++)
400  all_num_nodes_i_must_number[pid] = all_loadbal_data[8*pid + 7];
401 
402  // The sum of all the entries in this vector should sum to the number of global nodes
403  libmesh_assert (std::accumulate(all_num_nodes_i_must_number.begin(),
404  all_num_nodes_i_must_number.end(),
405  0) == nemhelper->num_nodes_global);
406 
407  unsigned int my_next_node = 0;
408  for (unsigned int pid=0; pid<this->processor_id(); pid++)
409  my_next_node += all_num_nodes_i_must_number[pid];
410 
411  const unsigned int my_node_offset = my_next_node;
412 
413  if (_verbose)
414  libMesh::out << "[" << this->processor_id() << "] "
415  << "my_node_offset="
416  << my_node_offset
417  << std::endl;
418 
419  // Add internal nodes to the ParallelMesh, using the node ID offset we
420  // computed and the current processor's ID.
421  for (unsigned int i=0; i<to_uint(nemhelper->num_internal_nodes); ++i)
422  {
423  const unsigned int local_node_idx = nemhelper->node_mapi[i]-1;
424 #ifndef NDEBUG
425  const unsigned int global_node_idx = nemhelper->node_num_map[local_node_idx]-1;
426  const unsigned int owning_pid_idx = node_ownership[local_node_idx];
427 #endif
428 
429  // an internal node we do not own? huh??
430  libmesh_assert_equal_to (owning_pid_idx, this->processor_id());
431  libmesh_assert_less (global_node_idx, to_uint(nemhelper->num_nodes_global));
432 
433  // "Catch" the node pointer after addition, make sure the
434  // ID matches the requested value.
435  Node* added_node =
436  mesh.add_point (Point(nemhelper->x[local_node_idx],
437  nemhelper->y[local_node_idx],
438  nemhelper->z[local_node_idx]),
439  my_next_node,
440  this->processor_id());
441 
442  // Make sure the node we added has the ID we thought it would
443  if (added_node->id() != my_next_node)
444  {
445  libMesh::err << "Error, node added with ID " << added_node->id()
446  << ", but we wanted ID " << my_next_node << std::endl;
447  }
448 
449  // update the local->global index map, when we are done
450  // it will be 0-based.
451  nemhelper->node_num_map[local_node_idx] = my_next_node++;
452  }
453 
454  // Now, for the boundary nodes... We may very well own some of them,
455  // but there may be others for which we have requested the new global
456  // id. We expect to be asked for the ids of the ones we own, so
457  // we need to create a map from the old global id to the new one
458  // we are about to create.
459  typedef std::vector<std::pair<unsigned int, unsigned int> > global_idx_mapping_type;
460  global_idx_mapping_type old_global_to_new_global_map;
461  old_global_to_new_global_map.reserve (num_nodes_i_must_number // total # i will have
462  - (my_next_node // amount i have thus far
463  - my_node_offset)); // this should be exact!
464  CompareGlobalIdxMappings global_idx_mapping_comp;
465 
466  for (unsigned int i=0; i<to_uint(nemhelper->num_border_nodes); ++i)
467  {
468  const unsigned int
469  local_node_idx = nemhelper->node_mapb[i]-1,
470  owning_pid_idx = node_ownership[local_node_idx];
471 
472  // if we own it...
473  if (owning_pid_idx == this->processor_id())
474  {
475  const unsigned int
476  global_node_idx = nemhelper->node_num_map[local_node_idx]-1;
477 
478  // we will number it, and create a mapping from its old global index to
479  // the new global index, for lookup purposes when neighbors come calling
480  old_global_to_new_global_map.push_back(std::make_pair(global_node_idx,
481  my_next_node));
482 
483  // "Catch" the node pointer after addition, make sure the
484  // ID matches the requested value.
485  Node* added_node =
486  mesh.add_point (Point(nemhelper->x[local_node_idx],
487  nemhelper->y[local_node_idx],
488  nemhelper->z[local_node_idx]),
489  my_next_node,
490  this->processor_id());
491 
492  // Make sure the node we added has the ID we thought it would
493  if (added_node->id() != my_next_node)
494  {
495  libMesh::err << "Error, node added with ID " << added_node->id()
496  << ", but we wanted ID " << my_next_node << std::endl;
497  }
498 
499  // update the local->global index map, when we are done
500  // it will be 0-based.
501  nemhelper->node_num_map[local_node_idx] = my_next_node++;
502  }
503  }
504  // That should cover numbering all the nodes which belong to us...
505  libmesh_assert_equal_to (num_nodes_i_must_number, (my_next_node - my_node_offset));
506 
507  // Let's sort the mapping so we can efficiently answer requests
508  std::sort (old_global_to_new_global_map.begin(),
509  old_global_to_new_global_map.end(),
510  global_idx_mapping_comp);
511 
512  // and it had better be unique...
513  libmesh_assert (std::unique (old_global_to_new_global_map.begin(),
514  old_global_to_new_global_map.end(),
515  global_idx_mapping_equality) ==
516  old_global_to_new_global_map.end());
517 
518  // We can now catch incoming requests and process them. for efficiency
519  // let's do whatever is available next
520  std::map<unsigned int, std::vector<int> > requested_node_idxs; // the indices asked of us
521 
522  std::vector<Parallel::Request> requested_nodes_requests(nemhelper->num_node_cmaps);
523 
524  // We know we will receive the request from a given processor before
525  // we receive its reply to our request. However, we may receive
526  // a request and a response from one processor before getting
527  // a request from another processor. So what we are doing here
528  // is processing whatever message comes next, while recognizing
529  // we will receive a request from a processor before receiving
530  // its reply
531  std::vector<bool> processed_cmap (nemhelper->num_node_cmaps, false);
532 
533  for (unsigned int comm_step=0; comm_step<2*to_uint(nemhelper->num_node_cmaps); comm_step++)
534  {
535  // query the first message which is available
536  const Parallel::Status
538  nodes_tag));
539  const unsigned int
540  requesting_pid_idx = status.source(),
541  source_pid_idx = status.source();
542 
543  // this had better be from a processor we are expecting...
544  libmesh_assert (pid_to_cmap_map.count(requesting_pid_idx));
545 
546  // the local cmap which corresponds to the source processor
547  const unsigned int cmap = pid_to_cmap_map[source_pid_idx];
548 
549  if (!processed_cmap[cmap])
550  {
551  processed_cmap[cmap] = true;
552 
553  // we should only get one request per paired processor
554  libmesh_assert (!requested_node_idxs.count(requesting_pid_idx));
555 
556  // get a reference to the request buffer for this processor to
557  // avoid repeated map lookups
558  std::vector<int> &xfer_buf (requested_node_idxs[requesting_pid_idx]);
559 
560  // actually receive the message.
561  this->comm().receive (requesting_pid_idx, xfer_buf, nodes_tag);
562 
563  // Fill the request
564  for (unsigned int i=0; i<xfer_buf.size(); i++)
565  {
566  // the requested old global node index, *now 0-based*
567  const unsigned int old_global_node_idx = xfer_buf[i];
568 
569  // find the new global node index for the requested node -
570  // note that requesting_pid_idx thinks we own this node,
571  // so we better!
572  const global_idx_mapping_type::const_iterator it =
573  std::lower_bound (old_global_to_new_global_map.begin(),
574  old_global_to_new_global_map.end(),
575  old_global_node_idx,
576  global_idx_mapping_comp);
577 
578  libmesh_assert (it != old_global_to_new_global_map.end());
579  libmesh_assert_equal_to (it->first, old_global_node_idx);
580  libmesh_assert_greater_equal (it->second, my_node_offset);
581  libmesh_assert_less (it->second, my_next_node);
582 
583  // overwrite the requested old global node index with the new global index
584  xfer_buf[i] = it->second;
585  }
586 
587  // and send the new global indices back to the processor which asked for them
588  this->comm().send (requesting_pid_idx,
589  xfer_buf,
590  requested_nodes_requests[cmap],
591  nodes_tag);
592  } // done processing the request
593 
594  // this is the second time we have heard from this processor,
595  // so it must be its reply to our request
596  else
597  {
598  // a long time ago, we sent off our own requests. now it is time to catch the
599  // replies and get the new global node numbering. note that for any reply
600  // we receive, the corresponding nonblocking send from above *must* have been
601  // completed, since the reply is in response to that request!!
602 
603  // if we have received a reply, our send *must* have completed
604  // (note we never actually need to wait on the request)
605  libmesh_assert (needed_nodes_requests[cmap].test());
606  libmesh_assert_equal_to (to_uint(nemhelper->node_cmap_ids[cmap]), source_pid_idx);
607 
608  // now post the receive for this cmap
609  this->comm().receive (source_pid_idx,
610  needed_node_idxs[cmap],
611  nodes_tag);
612 
613  libmesh_assert_less_equal (needed_node_idxs[cmap].size(),
614  nemhelper->node_cmap_node_ids[cmap].size());
615 
616  for (unsigned int i=0,j=0; i<nemhelper->node_cmap_node_ids[cmap].size(); i++)
617  {
618  const unsigned int
619  local_node_idx = nemhelper->node_cmap_node_ids[cmap][i]-1,
620  owning_pid_idx = node_ownership[local_node_idx];
621 
622  // if this node is owned by source_pid_idx, its new global id
623  // is in the buffer we just received
624  if (owning_pid_idx == source_pid_idx)
625  {
626  libmesh_assert_less (j, needed_node_idxs[cmap].size());
627 
628  const unsigned int // now 0-based!
629  global_node_idx = needed_node_idxs[cmap][j++];
630 
631  // "Catch" the node pointer after addition, make sure the
632  // ID matches the requested value.
633  Node* added_node =
634  mesh.add_point (Point(nemhelper->x[local_node_idx],
635  nemhelper->y[local_node_idx],
636  nemhelper->z[local_node_idx]),
637  global_node_idx,
638  source_pid_idx);
639 
640  // Make sure the node we added has the ID we thought it would
641  if (added_node->id() != global_node_idx)
642  {
643  libMesh::err << "Error, node added with ID " << added_node->id()
644  << ", but we wanted ID " << global_node_idx << std::endl;
645  }
646 
647  // update the local->global index map, when we are done
648  // it will be 0-based.
649  nemhelper->node_num_map[local_node_idx] = global_node_idx;
650 
651  // we are not really going to use my_next_node again, but we can
652  // keep incrimenting it to track how many nodes we have added
653  // to the mesh
654  my_next_node++;
655  }
656  }
657  }
658  } // end of node index communication loop
659 
660  // we had better have added all the nodes we need to!
661  libmesh_assert_equal_to ((my_next_node - my_node_offset), to_uint(nemhelper->num_nodes));
662 
663  // After all that, we should be done with all node-related arrays *except* the
664  // node_num_map, which we have transformed to use our new numbering...
665  // So let's clean up the arrays we are done with.
666  {
677  Utility::deallocate (needed_node_idxs);
678  Utility::deallocate (node_ownership);
679  }
680 
681  Parallel::wait (requested_nodes_requests);
682  requested_node_idxs.clear();
683 
684  // See what the node count is up to now.
685  if (_verbose)
686  {
687  // Report the number of nodes which have been added locally
688  libMesh::out << "[" << this->processor_id() << "] ";
689  libMesh::out << "mesh.n_nodes()=" << mesh.n_nodes() << std::endl;
690 
691  // Reports the number of nodes that have been added in total.
692  libMesh::out << "[" << this->processor_id() << "] ";
693  libMesh::out << "mesh.parallel_n_nodes()=" << mesh.parallel_n_nodes() << std::endl;
694  }
695 
696 
697 
698  // --------------------------------------------------------------------------------
699  // --------------------------------------------------------------------------------
700  // --------------------------------------------------------------------------------
701 
702 
703  // We can now read in the elements...Exodus stores them in blocks in which all
704  // elements have the same geometric type. This code is adapted directly from exodusII_io.C
705 
706  // Assertion: The sum of the border and internal elements on all processors
707  // should equal nemhelper->num_elems_global
708 #ifndef NDEBUG
709  {
710  int sum_internal_elems=0, sum_border_elems=0;
711  for (unsigned int j=3,c=0; c<this->n_processors(); j+=8,++c)
712  sum_internal_elems += all_loadbal_data[j];
713 
714  for (unsigned int j=4,c=0; c<this->n_processors(); j+=8,++c)
715  sum_border_elems += all_loadbal_data[j];
716 
717  if (_verbose)
718  {
719  libMesh::out << "[" << this->processor_id() << "] ";
720  libMesh::out << "sum_internal_elems=" << sum_internal_elems << std::endl;
721 
722  libMesh::out << "[" << this->processor_id() << "] ";
723  libMesh::out << "sum_border_elems=" << sum_border_elems << std::endl;
724  }
725 
726  libmesh_assert_equal_to (sum_internal_elems+sum_border_elems, nemhelper->num_elems_global);
727  }
728 #endif
729 
730  // We need to set the mesh dimension, but the following...
731  // mesh.set_mesh_dimension(static_cast<unsigned int>(nemhelper->num_dim));
732 
733  // ... is not sufficient since some codes report num_dim==3 for two dimensional
734  // meshes living in 3D, even though all the elements are of 2D type. Therefore,
735  // we instead use the dimension of the highest element found for the Mesh dimension,
736  // similar to what is done by the Exodus reader, except here it requires a
737  // parallel communication.
738  elems_of_dimension.resize(4, false); // will use 1-based
739 
740  // Compute my_elem_offset, the amount by which to offset the local elem numbering
741  // on my processor.
742  unsigned int my_next_elem = 0;
743  for (unsigned int pid=0; pid<this->processor_id(); ++pid)
744  my_next_elem += (all_loadbal_data[8*pid + 3]+ // num_internal_elems, proc pid
745  all_loadbal_data[8*pid + 4]); // num_border_elems, proc pid
746  const unsigned int my_elem_offset = my_next_elem;
747 
748  if (_verbose)
749  libMesh::out << "[" << this->processor_id() << "] "
750  << "my_elem_offset=" << my_elem_offset << std::endl;
751 
752 
753  // Fills in the:
754  // global_elem_blk_ids[] and
755  // global_elem_blk_cnts[] arrays.
757 
758 // // Fills in the vectors
759 // // elem_mapi[num_internal_elems]
760 // // elem_mapb[num_border_elems ]
761 // // These tell which of the (locally-numbered) elements are internal and which are border elements.
762 // // In our test example these arrays are sorted (but non-contiguous), which makes it possible to
763 // // binary search for each element ID... however I don't think we need to distinguish between the
764 // // two types, since either can have nodes the boundary!
765 // nemhelper->get_elem_map();
766 
767  // Fills in the vectors of vectors:
768  // elem_cmap_elem_ids[][]
769  // elem_cmap_side_ids[][]
770  // elem_cmap_proc_ids[][]
771  // These arrays are of size num_elem_cmaps * elem_cmap_elem_cnts[i], i = 0..num_elem_cmaps
773 
774  // Get information about the element blocks:
775  // (read in the array nemhelper->block_ids[])
777 
778  // Reads the nemhelper->elem_num_map array, elem_num_map[i] is the global element number for
779  // local element number i.
781 
782  // Instantiate the ElementMaps interface. This is what translates LibMesh's
783  // element numbering scheme to Exodus's.
785 
786  // Read in the element connectivity for each block by
787  // looping over all the blocks.
788  for (unsigned int i=0; i<to_uint(nemhelper->num_elem_blk); i++)
789  {
790  // Read the information for block i: For nemhelper->block_ids[i], reads
791  // elem_type
792  // num_elem_this_blk
793  // num_nodes_per_elem
794  // num_attr
795  // connect <-- the nodal connectivity array for each element in the block.
797 
798  // Note that with parallel files it is possible we have no elements in
799  // this block!
800  if (!nemhelper->num_elem_this_blk) continue;
801 
802  // Set subdomain ID based on the block ID.
803  int subdomain_id = nemhelper->block_ids[i];
804 
805  // Create a type string (this uses the null-terminated string ctor).
806  const std::string type_str ( &(nemhelper->elem_type[0]) );
807 
808  // Set any relevant node/edge maps for this element
809  const ExodusII_IO_Helper::Conversion conv = em.assign_conversion(type_str);
810 
811  if (_verbose)
812  libMesh::out << "Reading a block of " << type_str << " elements." << std::endl;
813 
814  // Loop over all the elements in this block
815  for (unsigned int j=0; j<to_uint(nemhelper->num_elem_this_blk); j++)
816  {
817  Elem* elem = Elem::build (conv.get_canonical_type()).release();
818  libmesh_assert (elem);
819 
820  // Assign subdomain and processor ID to the newly-created Elem.
821  // Assigning the processor ID beforehand ensures that the Elem is
822  // not added as an "unpartitioned" element. Note that the element
823  // numbering in Exodus is also 1-based.
824  elem->subdomain_id() = subdomain_id;
825  elem->processor_id() = this->processor_id();
826  elem->set_id() = my_next_elem++;
827 
828  // Mark that we have seen an element of the current element's
829  // dimension.
830  elems_of_dimension[elem->dim()] = true;
831 
832  // Add the created Elem to the Mesh, catch the Elem
833  // pointer that the Mesh throws back.
834  elem = mesh.add_elem (elem);
835 
836  // We are expecting the element "thrown back" by libmesh to have the ID we specified for it...
837  // Check to see that really is the case. Note that my_next_elem was post-incremented, so
838  // subtract 1 when performing the check.
839  if (elem->id() != my_next_elem-1)
840  {
841  libMesh::err << "Unexpected ID "
842  << elem->id()
843  << " set by parallel mesh. (expecting "
844  << my_next_elem-1
845  << ")." << std::endl;
846  libmesh_error();
847  }
848 
849  // Set all the nodes for this element
850  if (_verbose)
851  libMesh::out << "[" << this->processor_id() << "] "
852  << "Setting nodes for Elem " << elem->id() << std::endl;
853 
854  for (unsigned int k=0; k<to_uint(nemhelper->num_nodes_per_elem); k++)
855  {
856  const unsigned int
857  gi = (j*nemhelper->num_nodes_per_elem + // index into connectivity array
858  conv.get_node_map(k)),
859  local_node_idx = nemhelper->connect[gi]-1, // local node index
860  global_node_idx = nemhelper->node_num_map[local_node_idx]; // new global node index
861 
862  // Set node number
863  elem->set_node(k) = mesh.node_ptr(global_node_idx);
864  }
865  } // for (unsigned int j=0; j<nemhelper->num_elem_this_blk; j++)
866  } // end for (unsigned int i=0; i<nemhelper->num_elem_blk; i++)
867 
868  libmesh_assert_equal_to ((my_next_elem - my_elem_offset), to_uint(nemhelper->num_elem));
869 
870  if (_verbose)
871  {
872  // Print local elems_of_dimension information
873  for (unsigned int i=1; i<elems_of_dimension.size(); ++i)
874  libMesh::out << "[" << this->processor_id() << "] "
875  << "elems_of_dimension[" << i << "]=" << elems_of_dimension[i] << std::endl;
876  }
877 
878  // Get the max dimension seen on the current processor
879  unsigned int max_dim_seen = 0;
880  for (unsigned int i=1; i<elems_of_dimension.size(); ++i)
881  if (elems_of_dimension[i])
882  max_dim_seen = i;
883 
884  // Do a global max to determine the max dimension seen by all processors.
885  // It should match -- I don't think we even support calculations on meshes
886  // with elements of different dimension...
887  this->comm().max(max_dim_seen);
888 
889  if (_verbose)
890  {
891  // Print the max element dimension from all processors
892  libMesh::out << "[" << this->processor_id() << "] "
893  << "max_dim_seen=" << max_dim_seen << std::endl;
894  }
895 
896  // Set the mesh dimension to the largest encountered for an element
897  mesh.set_mesh_dimension(max_dim_seen);
898 
899 #if LIBMESH_DIM < 3
900  if (mesh.mesh_dimension() > LIBMESH_DIM)
901  {
902  libMesh::err << "Cannot open dimension " <<
903  mesh.mesh_dimension() <<
904  " mesh file when configured without " <<
905  mesh.mesh_dimension() << "D support." <<
906  std::endl;
907  libmesh_error();
908  }
909 #endif
910 
911 
912  // Global sideset information, they are distributed as well, not sure if they will require communication...?
914 
915  if (_verbose)
916  {
917  libMesh::out << "[" << this->processor_id() << "] "
918  << "Read global sideset parameter information." << std::endl;
919 
920  // These global values should be the same on all processors...
921  libMesh::out << "[" << this->processor_id() << "] "
922  << "Number of global sideset IDs: " << nemhelper->global_sideset_ids.size() << std::endl;
923  }
924 
925  // Read *local* sideset info the same way it is done in
926  // exodusII_io_helper. May be called any time after
927  // nem_helper->read_header(); This sets num_side_sets and resizes
928  // elem_list, side_list, and id_list to num_elem_all_sidesets. Note
929  // that there appears to be the same number of sidesets in each file
930  // but they all have different numbers of entries (some are empty).
931  // Note that the sum of "nemhelper->num_elem_all_sidesets" over all
932  // processors should equal the sum of the entries in the "num_global_side_counts" array
933  // filled up by nemhelper->get_ss_param_global()
935 
936  if (_verbose)
937  {
938  libMesh::out << "[" << this->processor_id() << "] "
939  << "nemhelper->num_side_sets = " << nemhelper->num_side_sets << std::endl;
940 
941  libMesh::out << "[" << this->processor_id() << "] "
942  << "nemhelper->num_elem_all_sidesets = " << nemhelper->num_elem_all_sidesets << std::endl;
943  }
944 
945 #ifdef DEBUG
946  {
947  // In DEBUG mode, check that the global number of sidesets reported
948  // in each nemesis file matches the sum of all local sideset counts
949  // from each processor. This requires a small communication, so only
950  // do it in DEBUG mode.
951  int sum_num_global_side_counts = std::accumulate(nemhelper->num_global_side_counts.begin(),
953  0);
954 
955  // MPI sum up the local files contributions
956  int sum_num_elem_all_sidesets = nemhelper->num_elem_all_sidesets;
957  this->comm().sum(sum_num_elem_all_sidesets);
958 
959  if (sum_num_global_side_counts != sum_num_elem_all_sidesets)
960  {
961  libMesh::err << "Error! global side count reported by Nemesis does not "
962  << "match the side count reported by the individual files!" << std::endl;
963  libmesh_error();
964  }
965  }
966 #endif
967 
968  // Note that exodus stores sidesets in separate vectors but we want to pack
969  // them all into a single vector. So when we call read_sideset(), we pass an offset
970  // into the single vector of all IDs
971  for (int offset=0, i=0; i<nemhelper->num_side_sets; i++)
972  {
973  offset += (i > 0 ? nemhelper->num_sides_per_set[i-1] : 0); // Compute new offset
974  nemhelper->read_sideset (i, offset);
975  }
976 
977  // Now that we have the lists of elements, sides, and IDs, we are ready to set them
978  // in the BoundaryInfo object of our Mesh object. This is slightly different in parallel...
979  // For example, I think the IDs in each of the split Exodus files are numbered locally,
980  // and we have to know the appropriate ID for this processor to be able to set the
981  // entry in BoundaryInfo. This offset should be given by my_elem_offset determined in
982  // this function...
983 
984  // Debugging:
985  // Print entries of elem_list
986  // libMesh::out << "[" << this->processor_id() << "] "
987  // << "elem_list = ";
988  // for (unsigned int e=0; e<nemhelper->elem_list.size(); e++)
989  // {
990  // libMesh::out << nemhelper->elem_list[e] << ", ";
991  // }
992  // libMesh::out << std::endl;
993 
994  // Print entries of side_list
995  // libMesh::out << "[" << this->processor_id() << "] "
996  // << "side_list = ";
997  // for (unsigned int e=0; e<nemhelper->side_list.size(); e++)
998  // {
999  // libMesh::out << nemhelper->side_list[e] << ", ";
1000  // }
1001  // libMesh::out << std::endl;
1002 
1003 
1004  // Loop over the entries of the elem_list, get their pointers from the
1005  // Mesh data structure, and assign the appropriate side to the BoundaryInfo object.
1006  for (unsigned int e=0; e<nemhelper->elem_list.size(); e++)
1007  {
1008  // Calling mesh.elem() feels wrong, for example, in
1009  // ParallelMesh, Mesh::elem() can return NULL so we have to check for this...
1010  //
1011  // Perhaps we should iterate over elements and look them up in
1012  // the elem list instead? Note that the IDs in elem_list are
1013  // not necessarily in order, so if we did instead loop over the
1014  // mesh, we would have to search the (unsorted) elem_list vector
1015  // for each entry! We'll settle for doing some error checking instead.
1016  Elem* elem = mesh.elem(my_elem_offset + (nemhelper->elem_list[e]-1)/*Exodus numbering is 1-based!*/);
1017 
1018  if (elem == NULL)
1019  {
1020  libMesh::err << "Mesh returned a NULL pointer when asked for element "
1021  << my_elem_offset << " + " << nemhelper->elem_list[e] << " = " << my_elem_offset+nemhelper->elem_list[e] << std::endl;
1022  libmesh_error();
1023  }
1024 
1025  // The side numberings in libmesh and exodus are not 1:1, so we need to map
1026  // whatever side number is stored in Exodus into a libmesh side number using
1027  // a conv object...
1028  const ExodusII_IO_Helper::Conversion conv =
1029  em.assign_conversion(elem->type());
1030 
1031  // Finally, we are ready to add the element and its side to the BoundaryInfo object.
1032  // Call the version of add_side which takes a pointer, since we have already gone to
1033  // the trouble of getting said pointer...
1034  mesh.boundary_info->add_side (elem,
1035  conv.get_side_map(nemhelper->side_list[e]-1/*Exodus numbering is 1-based*/),
1036  nemhelper->id_list[e]);
1037  }
1038 
1039  // Debugging: make sure there are as many boundary conditions in the
1040  // boundary ID object as expected. Note that, at this point, the
1041  // mesh still thinks it's serial, so n_boundary_conds() returns the
1042  // local number of boundary conditions (and is therefore cheap)
1043  // which should match nemhelper->elem_list.size().
1044  {
1045  std::size_t nbcs = mesh.boundary_info->n_boundary_conds();
1046  if (nbcs != nemhelper->elem_list.size())
1047  {
1048  libMesh::err << "[" << this->processor_id() << "] ";
1049  libMesh::err << "BoundaryInfo contains "
1050  << nbcs
1051  << " boundary conditions, while the Exodus file had "
1052  << nemhelper->elem_list.size()
1053  << std::endl;
1054  libmesh_error();
1055  }
1056  }
1057 
1058  // Read global nodeset parameters? We might be able to use this to verify
1059  // something about the local files, but I haven't figured out what yet...
1061 
1062  // Read local nodeset info
1064 
1065  if (_verbose)
1066  {
1067  libMesh::out << "[" << this->processor_id() << "] ";
1068  libMesh::out << "nemhelper->num_node_sets=" << nemhelper->num_node_sets << std::endl;
1069  }
1070 
1071 // // Debugging, what is currently in nemhelper->node_num_map anyway?
1072 // libMesh::out << "[" << this->processor_id() << "] "
1073 // << "nemhelper->node_num_map = ";
1074 //
1075 // for (unsigned int i=0; i<nemhelper->node_num_map.size(); ++i)
1076 // libMesh::out << nemhelper->node_num_map[i] << ", ";
1077 // libMesh::out << std::endl;
1078 
1079  // For each nodeset,
1080  for (int nodeset=0; nodeset<nemhelper->num_node_sets; nodeset++)
1081  {
1082  // Get the user-defined ID associcated with the nodeset
1083  int nodeset_id = nemhelper->nodeset_ids[nodeset];
1084 
1085  if (_verbose)
1086  {
1087  libMesh::out << "[" << this->processor_id() << "] ";
1088  libMesh::out << "nemhelper->nodeset_ids[" << nodeset << "]=" << nodeset_id << std::endl;
1089  }
1090 
1091  // Read the nodeset from file, store them in a vector
1092  nemhelper->read_nodeset(nodeset);
1093 
1094  // Add nodes from the node_list to the BoundaryInfo object
1095  for(unsigned int node=0; node<nemhelper->node_list.size(); node++)
1096  {
1097  // Don't run past the end of our node map!
1098  if (to_uint(nemhelper->node_list[node]-1) >= nemhelper->node_num_map.size())
1099  {
1100  libMesh::err << "Error, index is past the end of node_num_map array!" << std::endl;
1101  libmesh_error();
1102  }
1103 
1104  // We should be able to use the node_num_map data structure set up previously to determine
1105  // the proper global node index.
1106  unsigned global_node_id = nemhelper->node_num_map[ nemhelper->node_list[node]-1 /*Exodus is 1-based!*/ ];
1107 
1108  if (_verbose)
1109  {
1110  libMesh::out << "[" << this->processor_id() << "] "
1111  << "nodeset " << nodeset
1112  << ", local node number: " << nemhelper->node_list[node]-1
1113  << ", global node id: " << global_node_id
1114  << std::endl;
1115  }
1116 
1117  // Add the node to the BoundaryInfo object with the proper nodeset_id
1118  mesh.boundary_info->add_node(global_node_id, nodeset_id);
1119  }
1120  }
1121 
1122  // See what the elem count is up to now.
1123  if (_verbose)
1124  {
1125  // Report the number of elements which have been added locally
1126  libMesh::out << "[" << this->processor_id() << "] ";
1127  libMesh::out << "mesh.n_elem()=" << mesh.n_elem() << std::endl;
1128 
1129  // Reports the number of elements that have been added in total.
1130  libMesh::out << "[" << this->processor_id() << "] ";
1131  libMesh::out << "mesh.parallel_n_elem()=" << mesh.parallel_n_elem() << std::endl;
1132  }
1133 
1134  STOP_LOG ("read()","Nemesis_IO");
1135 
1136  // For ParallelMesh, it seems that _is_serial is true by default. A hack to
1137  // make the Mesh think it's parallel might be to call:
1138  mesh.update_post_partitioning();
1139  mesh.delete_remote_elements();
1140 
1141  // And if that didn't work, then we're actually reading into a
1142  // SerialMesh, so forget about gathering neighboring elements
1143  if (mesh.is_serial())
1144  return;
1145 
1146  // Gather neighboring elements so that the mesh has the proper "ghost" neighbor information.
1147  MeshCommunication().gather_neighboring_elements(libmesh_cast_ref<ParallelMesh&>(mesh));
1148 }
1149 
1150 #else
1151 
1152 void Nemesis_IO::read (const std::string& )
1153 {
1154  libMesh::err << "ERROR, Nemesis API is not defined!" << std::endl;
1155  libmesh_error();
1156 }
1157 
1158 #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1159 
1160 
1161 
1162 
1163 
1164 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1165 
1166 void Nemesis_IO::write (const std::string& base_filename)
1167 {
1168  // Get a constant reference to the mesh for writing
1170 
1171  // Create the filename for this processor given the base_filename passed in.
1172  std::string nemesis_filename = nemhelper->construct_nemesis_filename(base_filename);
1173 
1174  // If the user has set the append flag here, it doesn't really make
1175  // sense: the intent of this function is to write a Mesh with no
1176  // data, while "appending" is really intended to add data to an
1177  // existing file. If we're verbose, print a message to this effect.
1178  if (_append && _verbose)
1179  libMesh::out << "Warning: Appending in Nemesis_IO::write() does not make sense.\n"
1180  << "Creating a new file instead!"
1181  << std::endl;
1182 
1183  nemhelper->create(nemesis_filename);
1184 
1185  // Initialize data structures and write some global Nemesis-specifc data, such as
1186  // communication maps, to file.
1187  nemhelper->initialize(nemesis_filename,mesh);
1188 
1189  // Call the Nemesis-specialized version of write_nodal_coordinates() to write
1190  // the nodal coordinates.
1192 
1193  // Call the Nemesis-specialized version of write_elements() to write
1194  // the elements. Note: Must write a zero if a given global block ID has no
1195  // elements...
1196  nemhelper->write_elements(mesh);
1197 
1198  // Call our specialized function to write the nodesets
1199  nemhelper->write_nodesets(mesh);
1200 
1201  // Call our specialized write_sidesets() function to write the sidesets to file
1202  nemhelper->write_sidesets(mesh);
1203 
1204  // Not sure if this is really necessary, but go ahead and flush the file
1205  // once we have written all this stuff.
1207 
1208  if( (mesh.boundary_info->n_edge_conds() > 0) &&
1209  _verbose )
1210  {
1211  libMesh::out << "Warning: Mesh contains edge boundary IDs, but these "
1212  << "are not supported by the Nemesis format."
1213  << std::endl;
1214  }
1215 }
1216 
1217 #else
1218 
1219 void Nemesis_IO::write (const std::string& )
1220 {
1221  libMesh::err << "ERROR, Nemesis API is not defined!" << std::endl;
1222  libmesh_error();
1223 }
1224 
1225 #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1226 
1227 
1228 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1229 
1230 void Nemesis_IO::write_timestep (const std::string& fname,
1231  const EquationSystems& es,
1232  const int timestep,
1233  const Real time)
1234 {
1235  _timestep=timestep;
1236  write_equation_systems(fname,es);
1237 
1238  nemhelper->write_timestep(timestep, time);
1239 }
1240 
1241 #else
1242 
1243 void Nemesis_IO::write_timestep (const std::string&,
1244  const EquationSystems&,
1245  const int,
1246  const Real)
1247 {
1248  libMesh::err << "ERROR, Nemesis API is not defined!" << std::endl;
1249  libmesh_error();
1250 }
1251 
1252 #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1253 
1254 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1255 
1256 void Nemesis_IO::write_nodal_data (const std::string& base_filename,
1257  const std::vector<Number>& soln,
1258  const std::vector<std::string>& names)
1259 {
1260  START_LOG("write_nodal_data()", "Nemesis_IO");
1261 
1263 
1264  std::string nemesis_filename = nemhelper->construct_nemesis_filename(base_filename);
1265 
1267  {
1268  // If we're appending, open() the file with read_only=false,
1269  // otherwise create() it and write the contents of the mesh to
1270  // it.
1271  if (_append)
1272  {
1273  nemhelper->open(nemesis_filename.c_str(), /*read_only=*/false);
1274  // After opening the file, read the header so that certain
1275  // fields, such as the number of nodes and the number of
1276  // elements, are correctly initialized for the subsequent
1277  // call to write the nodal solution.
1279  }
1280  else
1281  {
1282  nemhelper->create(nemesis_filename);
1283  nemhelper->initialize(nemesis_filename,mesh);
1285  nemhelper->write_elements(mesh);
1286  nemhelper->write_nodesets(mesh);
1287  nemhelper->write_sidesets(mesh);
1288 
1289  if( (mesh.boundary_info->n_edge_conds() > 0) &&
1290  _verbose )
1291  {
1292  libMesh::out << "Warning: Mesh contains edge boundary IDs, but these "
1293  << "are not supported by the ExodusII format."
1294  << std::endl;
1295  }
1296 
1297  // If we don't have any nodes written out on this processor,
1298  // Exodus seems to like us better if we don't try to write out any
1299  // variable names too...
1301  }
1302  }
1303 
1304  nemhelper->write_nodal_solution(soln, names, _timestep);
1305 
1306  STOP_LOG("write_nodal_data()", "Nemesis_IO");
1307 }
1308 
1309 #else
1310 
1311 void Nemesis_IO::write_nodal_data (const std::string& ,
1312  const std::vector<Number>& ,
1313  const std::vector<std::string>& )
1314 {
1315 
1316  libMesh::err << "ERROR, Nemesis API is not defined.\n"
1317  << std::endl;
1318  libmesh_error();
1319 }
1320 
1321 
1322 #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1323 
1324 
1325 
1326 
1327 
1328 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1329 
1330 void Nemesis_IO::write_global_data (const std::vector<Number>& soln,
1331  const std::vector<std::string>& names)
1332 {
1334  {
1335  libMesh::err << "ERROR, Nemesis file must be initialized "
1336  << "before outputting global variables.\n"
1337  << std::endl;
1338  libmesh_error();
1339  }
1340 
1341  // Call the Exodus writer implementation
1344 }
1345 
1346 #else
1347 
1348 void Nemesis_IO::write_global_data (const std::vector<Number>&,
1349  const std::vector<std::string>&)
1350 {
1351  libMesh::err << "ERROR, Nemesis API is not defined.\n"
1352  << std::endl;
1353  libmesh_error();
1354 }
1355 
1356 #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1357 
1358 
1359 
1360 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1361 
1362 void Nemesis_IO::write_information_records (const std::vector<std::string>& records)
1363 {
1365  {
1366  libMesh::err << "ERROR, Nemesis file must be initialized "
1367  << "before outputting information records.\n"
1368  << std::endl;
1369  libmesh_error();
1370  }
1371 
1372  // Call the Exodus writer implementation
1374 }
1375 
1376 
1377 #else
1378 
1379 void Nemesis_IO::write_information_records ( const std::vector<std::string>& )
1380 {
1381 
1382  libMesh::err << "ERROR, Nemesis API is not defined.\n"
1383  << std::endl;
1384  libmesh_error();
1385 }
1386 
1387 #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1388 
1389 } // namespace libMesh

Site Created By: libMesh Developers
Last modified: February 07 2014 16:57:06 UTC

Hosted By:
SourceForge.net Logo