nemesis_io_helper.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2014 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 
19 // C++ headers
20 #include <iomanip>
21 #include <set>
22 #include <sstream>
23 
24 // Libmesh headers
26 #include "libmesh/node.h"
27 #include "libmesh/elem.h"
28 #include "libmesh/boundary_info.h"
29 #include "libmesh/parallel.h"
30 
31 #if defined(LIBMESH_HAVE_NEMESIS_API) && defined(LIBMESH_HAVE_EXODUS_API)
32 
33 namespace libMesh
34 {
35 
36 
37 // Initialize the various integer members to zero. We can check
38 // these later to see if they've been properly initialized...
39 // The parent ExodusII_IO_Helper is created with the run_only_on_proc0
40 // flag set to false, so that we can make use of its functionality
41 // on multiple processors.
43  bool verbose_in) :
44  ExodusII_IO_Helper(parent, verbose_in, /*run_only_on_proc0=*/false),
45  nemesis_err_flag(0),
46  num_nodes_global(0),
47  num_elems_global(0),
48  num_elem_blks_global(0),
49  num_node_sets_global(0),
50  num_side_sets_global(0),
51  num_proc(0),
52  num_proc_in_file(0),
53  ftype('\0'),
54  num_internal_nodes(0),
55  num_border_nodes(0),
56  num_external_nodes(0),
57  num_internal_elems(0),
58  num_border_elems(0),
59  num_node_cmaps(0),
60  num_elem_cmaps(0)
61 {
62  // Warn about using untested code!
63  libmesh_experimental();
64 }
65 
66 
68 {
69  // Our destructor is called from Nemesis_IO. We close the Exodus file here since we have
70  // responsibility for managing the file's lifetime.
71  this->ex_err = exII::ex_update(this->ex_id);
72  EX_CHECK_ERR(ex_err, "Error flushing buffers to file.");
73  this->close();
74 }
75 
76 
77 
79 {
81  Nemesis::ne_get_init_global(ex_id,
87  EX_CHECK_ERR(nemesis_err_flag, "Error reading initial global data!");
88 
89  if (verbose)
90  {
91  libMesh::out << "[" << this->processor_id() << "] " << "num_nodes_global=" << num_nodes_global << std::endl;
92  libMesh::out << "[" << this->processor_id() << "] " << "num_elems_global=" << num_elems_global << std::endl;
93  libMesh::out << "[" << this->processor_id() << "] " << "num_elem_blks_global=" << num_elem_blks_global << std::endl;
94  libMesh::out << "[" << this->processor_id() << "] " << "num_node_sets_global=" << num_node_sets_global << std::endl;
95  libMesh::out << "[" << this->processor_id() << "] " << "num_side_sets_global=" << num_side_sets_global << std::endl;
96  }
97 }
98 
99 
100 
102 {
103  if (num_side_sets_global > 0)
104  {
107 
108  // df = "distribution factor", not really sure what that is. I don't yet have a file
109  // which has distribution factors so I guess we'll worry about it later...
111 
113  Nemesis::ne_get_ss_param_global(ex_id,
114  global_sideset_ids.empty() ? NULL : &global_sideset_ids[0],
115  num_global_side_counts.empty() ? NULL : &num_global_side_counts[0],
117  EX_CHECK_ERR(nemesis_err_flag, "Error reading global sideset parameters!");
118 
119  if (verbose)
120  {
121  libMesh::out << "[" << this->processor_id() << "] " << "Global Sideset IDs, Side Counts, and DF counts:" << std::endl;
122  for (unsigned int bn=0; bn<global_sideset_ids.size(); ++bn)
123  {
124  libMesh::out << " [" << this->processor_id() << "] "
125  << "global_sideset_ids["<<bn<<"]=" << global_sideset_ids[bn]
126  << ", num_global_side_counts["<<bn<<"]=" << num_global_side_counts[bn]
127  << ", num_global_side_df_counts["<<bn<<"]=" << num_global_side_df_counts[bn]
128  << std::endl;
129  }
130  }
131  }
132 }
133 
134 
135 
136 
138 {
139  if (num_node_sets_global > 0)
140  {
144 
146  Nemesis::ne_get_ns_param_global(ex_id,
147  global_nodeset_ids.empty() ? NULL : &global_nodeset_ids[0],
148  num_global_node_counts.empty() ? NULL : &num_global_node_counts[0],
150  EX_CHECK_ERR(nemesis_err_flag, "Error reading global nodeset parameters!");
151 
152  if (verbose)
153  {
154  libMesh::out << "[" << this->processor_id() << "] " << "Global Nodeset IDs, Node Counts, and DF counts:" << std::endl;
155  for (unsigned int bn=0; bn<global_nodeset_ids.size(); ++bn)
156  {
157  libMesh::out << " [" << this->processor_id() << "] "
158  << "global_nodeset_ids["<<bn<<"]=" << global_nodeset_ids[bn]
159  << ", num_global_node_counts["<<bn<<"]=" << num_global_node_counts[bn]
160  << ", num_global_node_df_counts["<<bn<<"]=" << num_global_node_df_counts[bn]
161  << std::endl;
162  }
163  }
164  }
165 }
166 
167 
168 
170 {
173 
175  Nemesis::ne_get_eb_info_global(ex_id,
176  global_elem_blk_ids.empty() ? NULL : &global_elem_blk_ids[0],
177  global_elem_blk_cnts.empty() ? NULL : &global_elem_blk_cnts[0]);
178  EX_CHECK_ERR(nemesis_err_flag, "Error reading global element block info!");
179 
180  if (verbose)
181  {
182  libMesh::out << "[" << this->processor_id() << "] " << "Global Element Block IDs and Counts:" << std::endl;
183  for (unsigned int bn=0; bn<global_elem_blk_ids.size(); ++bn)
184  {
185  libMesh::out << " [" << this->processor_id() << "] "
186  << "global_elem_blk_ids["<<bn<<"]=" << global_elem_blk_ids[bn]
187  << ", global_elem_blk_cnts["<<bn<<"]=" << global_elem_blk_cnts[bn]
188  << std::endl;
189  }
190  }
191 }
192 
193 
194 
196 {
198  Nemesis::ne_get_init_info(ex_id,
199  &num_proc,
201  &ftype);
202  EX_CHECK_ERR(nemesis_err_flag, "Error reading initial info!");
203 
204  if (verbose)
205  {
206  libMesh::out << "[" << this->processor_id() << "] " << "num_proc=" << num_proc << std::endl;
207  libMesh::out << "[" << this->processor_id() << "] " << "num_proc_in_file=" << num_proc_in_file << std::endl;
208  libMesh::out << "[" << this->processor_id() << "] " << "ftype=" << ftype << std::endl;
209  }
210 }
211 
212 
213 
215 {
217  Nemesis::ne_get_loadbal_param(ex_id,
225  this->processor_id() // The ID of the processor for which info is to be read
226  );
227  EX_CHECK_ERR(nemesis_err_flag, "Error reading load balance parameters!");
228 
229 
230  if (verbose)
231  {
232  libMesh::out << "[" << this->processor_id() << "] " << "num_internal_nodes=" << num_internal_nodes << std::endl;
233  libMesh::out << "[" << this->processor_id() << "] " << "num_border_nodes=" << num_border_nodes << std::endl;
234  libMesh::out << "[" << this->processor_id() << "] " << "num_external_nodes=" << num_external_nodes << std::endl;
235  libMesh::out << "[" << this->processor_id() << "] " << "num_internal_elems=" << num_internal_elems << std::endl;
236  libMesh::out << "[" << this->processor_id() << "] " << "num_border_elems=" << num_border_elems << std::endl;
237  libMesh::out << "[" << this->processor_id() << "] " << "num_node_cmaps=" << num_node_cmaps << std::endl;
238  libMesh::out << "[" << this->processor_id() << "] " << "num_elem_cmaps=" << num_elem_cmaps << std::endl;
239  }
240 }
241 
242 
243 
245 {
247  elem_mapb.resize(num_border_elems);
248 
250  Nemesis::ne_get_elem_map(ex_id,
251  elem_mapi.empty() ? NULL : &elem_mapi[0],
252  elem_mapb.empty() ? NULL : &elem_mapb[0],
253  this->processor_id()
254  );
255  EX_CHECK_ERR(nemesis_err_flag, "Error reading element maps!");
256 
257 
258  if (verbose)
259  {
260  // These are not contiguous ranges....
261  //libMesh::out << "[" << this->processor_id() << "] " << "first interior elem id=" << elem_mapi[0] << std::endl;
262  //libMesh::out << "[" << this->processor_id() << "] " << "last interior elem id=" << elem_mapi.back() << std::endl;
263  //libMesh::out << "[" << this->processor_id() << "] " << "first boundary elem id=" << elem_mapb[0] << std::endl;
264  //libMesh::out << "[" << this->processor_id() << "] " << "last boundary elem id=" << elem_mapb.back() << std::endl;
265  libMesh::out << "[" << this->processor_id() << "] elem_mapi[i] = ";
266  for (unsigned int i=0; i< static_cast<unsigned int>(num_internal_elems-1); ++i)
267  libMesh::out << elem_mapi[i] << ", ";
268  libMesh::out << "... " << elem_mapi.back() << std::endl;
269 
270  libMesh::out << "[" << this->processor_id() << "] elem_mapb[i] = ";
271  for (unsigned int i=0; i< static_cast<unsigned int>(std::min(10, num_border_elems-1)); ++i)
272  libMesh::out << elem_mapb[i] << ", ";
273  libMesh::out << "... " << elem_mapb.back() << std::endl;
274  }
275 }
276 
277 
278 
279 
281 {
283  node_mapb.resize(num_border_nodes);
285 
287  Nemesis::ne_get_node_map(ex_id,
288  node_mapi.empty() ? NULL : &node_mapi[0],
289  node_mapb.empty() ? NULL : &node_mapb[0],
290  node_mape.empty() ? NULL : &node_mape[0],
291  this->processor_id()
292  );
293  EX_CHECK_ERR(nemesis_err_flag, "Error reading node maps!");
294 
295  if (verbose)
296  {
297  // Remark: The Exodus/Nemesis node numbring is always (?) 1-based! This means the first interior node id will
298  // always be == 1.
299  libMesh::out << "[" << this->processor_id() << "] " << "first interior node id=" << node_mapi[0] << std::endl;
300  libMesh::out << "[" << this->processor_id() << "] " << "last interior node id=" << node_mapi.back() << std::endl;
301 
302  libMesh::out << "[" << this->processor_id() << "] " << "first boundary node id=" << node_mapb[0] << std::endl;
303  libMesh::out << "[" << this->processor_id() << "] " << "last boundary node id=" << node_mapb.back() << std::endl;
304 
305  // The number of external nodes is sometimes zero, don't try to access
306  // node_mape.back() in this case!
307  if (num_external_nodes > 0)
308  {
309  libMesh::out << "[" << this->processor_id() << "] " << "first external node id=" << node_mape[0] << std::endl;
310  libMesh::out << "[" << this->processor_id() << "] " << "last external node id=" << node_mape.back() << std::endl;
311  }
312  }
313 }
314 
315 
316 
318 {
323 
325  Nemesis::ne_get_cmap_params(ex_id,
326  node_cmap_ids.empty() ? NULL : &node_cmap_ids[0],
327  node_cmap_node_cnts.empty() ? NULL : &node_cmap_node_cnts[0],
328  elem_cmap_ids.empty() ? NULL : &elem_cmap_ids[0],
329  elem_cmap_elem_cnts.empty() ? NULL : &elem_cmap_elem_cnts[0],
330  this->processor_id());
331  EX_CHECK_ERR(nemesis_err_flag, "Error reading cmap parameters!");
332 
333 
334  if (verbose)
335  {
336  libMesh::out << "[" << this->processor_id() << "] ";
337  for (unsigned int i=0; i<node_cmap_ids.size(); ++i)
338  libMesh::out << "node_cmap_ids["<<i<<"]=" << node_cmap_ids[i] << " ";
339  libMesh::out << std::endl;
340 
341  libMesh::out << "[" << this->processor_id() << "] ";
342  for (unsigned int i=0; i<node_cmap_node_cnts.size(); ++i)
343  libMesh::out << "node_cmap_node_cnts["<<i<<"]=" << node_cmap_node_cnts[i] << " ";
344  libMesh::out << std::endl;
345 
346  libMesh::out << "[" << this->processor_id() << "] ";
347  for (unsigned int i=0; i<elem_cmap_ids.size(); ++i)
348  libMesh::out << "elem_cmap_ids["<<i<<"]=" << elem_cmap_ids[i] << " ";
349  libMesh::out << std::endl;
350 
351  libMesh::out << "[" << this->processor_id() << "] ";
352  for (unsigned int i=0; i<elem_cmap_elem_cnts.size(); ++i)
353  libMesh::out << "elem_cmap_elem_cnts["<<i<<"]=" << elem_cmap_elem_cnts[i] << " ";
354  libMesh::out << std::endl;
355  }
356 }
357 
358 
359 
361 {
364 
365  for (unsigned int i=0; i<node_cmap_node_ids.size(); ++i)
366  {
369 
371  Nemesis::ne_get_node_cmap(ex_id,
372  node_cmap_ids.empty() ? 0 : node_cmap_ids[i],
373  node_cmap_node_ids[i].empty() ? NULL : &node_cmap_node_ids[i][0],
374  node_cmap_proc_ids[i].empty() ? NULL : &node_cmap_proc_ids[i][0],
375  this->processor_id());
376  EX_CHECK_ERR(nemesis_err_flag, "Error reading node cmap node and processor ids!");
377 
378  if (verbose)
379  {
380  libMesh::out << "[" << this->processor_id() << "] node_cmap_node_ids["<<i<<"]=";
381  for (unsigned int j=0; j<node_cmap_node_ids[i].size(); ++j)
382  libMesh::out << node_cmap_node_ids[i][j] << " ";
383  libMesh::out << std::endl;
384 
385  // This is basically a vector, all entries of which are = node_cmap_ids[i]
386  // Not sure if it's always guaranteed to be that or what...
387  libMesh::out << "[" << this->processor_id() << "] node_cmap_proc_ids["<<i<<"]=";
388  for (unsigned int j=0; j<node_cmap_proc_ids[i].size(); ++j)
389  libMesh::out << node_cmap_proc_ids[i][j] << " ";
390  libMesh::out << std::endl;
391  }
392  }
393 }
394 
395 
396 
398 {
402 
403  for (unsigned int i=0; i<elem_cmap_elem_ids.size(); ++i)
404  {
408 
410  Nemesis::ne_get_elem_cmap(ex_id,
411  elem_cmap_ids.empty() ? 0 : elem_cmap_ids[i],
412  elem_cmap_elem_ids[i].empty() ? NULL : &elem_cmap_elem_ids[i][0],
413  elem_cmap_side_ids[i].empty() ? NULL : &elem_cmap_side_ids[i][0],
414  elem_cmap_proc_ids[i].empty() ? NULL : &elem_cmap_proc_ids[i][0],
415  this->processor_id());
416  EX_CHECK_ERR(nemesis_err_flag, "Error reading elem cmap elem, side, and processor ids!");
417 
418 
419  if (verbose)
420  {
421  libMesh::out << "[" << this->processor_id() << "] elem_cmap_elem_ids["<<i<<"]=";
422  for (unsigned int j=0; j<elem_cmap_elem_ids[i].size(); ++j)
423  libMesh::out << elem_cmap_elem_ids[i][j] << " ";
424  libMesh::out << std::endl;
425 
426  // These must be the (local) side IDs (in the ExodusII face numbering scheme)
427  // of the sides shared across processors.
428  libMesh::out << "[" << this->processor_id() << "] elem_cmap_side_ids["<<i<<"]=";
429  for (unsigned int j=0; j<elem_cmap_side_ids[i].size(); ++j)
430  libMesh::out << elem_cmap_side_ids[i][j] << " ";
431  libMesh::out << std::endl;
432 
433  // This is basically a vector, all entries of which are = elem_cmap_ids[i]
434  // Not sure if it's always guaranteed to be that or what...
435  libMesh::out << "[" << this->processor_id() << "] elem_cmap_proc_ids["<<i<<"]=";
436  for (unsigned int j=0; j<elem_cmap_proc_ids[i].size(); ++j)
437  libMesh::out << elem_cmap_proc_ids[i][j] << " ";
438  libMesh::out << std::endl;
439  }
440  }
441 }
442 
443 
444 
445 
446 void Nemesis_IO_Helper::put_init_info(unsigned num_proc_in,
447  unsigned num_proc_in_file_in,
448  const char* ftype_in)
449 {
451  Nemesis::ne_put_init_info(ex_id,
452  num_proc_in,
453  num_proc_in_file_in,
454  const_cast<char*>(ftype_in));
455 
456  EX_CHECK_ERR(nemesis_err_flag, "Error writing initial information!");
457 }
458 
459 
460 
461 
463  dof_id_type num_elems_global_in,
464  unsigned num_elem_blks_global_in,
465  unsigned num_node_sets_global_in,
466  unsigned num_side_sets_global_in)
467 {
469  Nemesis::ne_put_init_global(ex_id,
470  num_nodes_global_in,
471  num_elems_global_in,
472  num_elem_blks_global_in,
473  num_node_sets_global_in,
474  num_side_sets_global_in);
475 
476  EX_CHECK_ERR(nemesis_err_flag, "Error writing initial global data!");
477 }
478 
479 
480 
481 void Nemesis_IO_Helper::put_eb_info_global(std::vector<int>& global_elem_blk_ids_in,
482  std::vector<int>& global_elem_blk_cnts_in)
483 {
485  Nemesis::ne_put_eb_info_global(ex_id,
486  &global_elem_blk_ids_in[0],
487  &global_elem_blk_cnts_in[0]);
488 
489  EX_CHECK_ERR(nemesis_err_flag, "Error writing global element block information!");
490 }
491 
492 
493 
494 
495 void Nemesis_IO_Helper::put_ns_param_global(std::vector<int>& global_nodeset_ids_in,
496  std::vector<int>& num_global_node_counts_in,
497  std::vector<int>& num_global_node_df_counts_in)
498 {
499  // Only add nodesets if there are some
500  if(global_nodeset_ids.size())
501  {
503  Nemesis::ne_put_ns_param_global(ex_id,
504  &global_nodeset_ids_in[0],
505  &num_global_node_counts_in[0],
506  &num_global_node_df_counts_in[0]);
507  }
508 
509  EX_CHECK_ERR(nemesis_err_flag, "Error writing global nodeset parameters!");
510 }
511 
512 
513 
514 
515 void Nemesis_IO_Helper::put_ss_param_global(std::vector<int>& global_sideset_ids_in,
516  std::vector<int>& num_global_side_counts_in,
517  std::vector<int>& num_global_side_df_counts_in)
518 {
519  // Only add sidesets if there are some
520  if(global_sideset_ids.size())
521  {
523  Nemesis::ne_put_ss_param_global(ex_id,
524  &global_sideset_ids_in[0],
525  &num_global_side_counts_in[0],
526  &num_global_side_df_counts_in[0]);
527  }
528 
529  EX_CHECK_ERR(nemesis_err_flag, "Error writing global sideset parameters!");
530 }
531 
532 
533 
534 
535 void Nemesis_IO_Helper::put_loadbal_param(unsigned num_internal_nodes_in,
536  unsigned num_border_nodes_in,
537  unsigned num_external_nodes_in,
538  unsigned num_internal_elems_in,
539  unsigned num_border_elems_in,
540  unsigned num_node_cmaps_in,
541  unsigned num_elem_cmaps_in)
542 {
544  Nemesis::ne_put_loadbal_param(ex_id,
545  num_internal_nodes_in,
546  num_border_nodes_in,
547  num_external_nodes_in,
548  num_internal_elems_in,
549  num_border_elems_in,
550  num_node_cmaps_in,
551  num_elem_cmaps_in,
552  this->processor_id());
553 
554  EX_CHECK_ERR(nemesis_err_flag, "Error writing loadbal parameters!");
555 }
556 
557 
558 
559 
560 
561 void Nemesis_IO_Helper::put_cmap_params(std::vector<int>& node_cmap_ids_in,
562  std::vector<int>& node_cmap_node_cnts_in,
563  std::vector<int>& elem_cmap_ids_in,
564  std::vector<int>& elem_cmap_elem_cnts_in)
565 {
566  // We might not have cmaps on every processor in some corner
567  // cases
568  if(node_cmap_ids.size())
569  {
571  Nemesis::ne_put_cmap_params(ex_id,
572  &node_cmap_ids_in[0],
573  &node_cmap_node_cnts_in[0],
574  &elem_cmap_ids_in[0],
575  &elem_cmap_elem_cnts_in[0],
576  this->processor_id());
577  }
578 
579  EX_CHECK_ERR(nemesis_err_flag, "Error writing cmap parameters!");
580 }
581 
582 
583 
584 
585 void Nemesis_IO_Helper::put_node_cmap(std::vector<std::vector<int> >& node_cmap_node_ids_in,
586  std::vector<std::vector<int> >& node_cmap_proc_ids_in)
587 {
588 
589  // Print to screen what we are about to print to Nemesis file
590  if (verbose)
591  {
592  for (unsigned i=0; i<node_cmap_node_ids_in.size(); ++i)
593  {
594  libMesh::out << "[" << this->processor_id() << "] put_node_cmap() : nodes communicated to proc "
595  << this->node_cmap_ids[i]
596  << " = ";
597  for (unsigned j=0; j<node_cmap_node_ids_in[i].size(); ++j)
598  libMesh::out << node_cmap_node_ids_in[i][j] << " ";
599  libMesh::out << std::endl;
600  }
601 
602  for (unsigned i=0; i<node_cmap_node_ids_in.size(); ++i)
603  {
604  libMesh::out << "[" << this->processor_id() << "] put_node_cmap() : processor IDs = ";
605  for (unsigned j=0; j<node_cmap_proc_ids_in[i].size(); ++j)
606  libMesh::out << node_cmap_proc_ids_in[i][j] << " ";
607  libMesh::out << std::endl;
608  }
609  }
610 
611  for (unsigned int i=0; i<node_cmap_node_ids_in.size(); ++i)
612  {
614  Nemesis::ne_put_node_cmap(ex_id,
615  this->node_cmap_ids[i],
616  &node_cmap_node_ids_in[i][0],
617  &node_cmap_proc_ids_in[i][0],
618  this->processor_id());
619 
620  EX_CHECK_ERR(nemesis_err_flag, "Error writing node communication map to file!");
621  }
622 }
623 
624 
625 
626 
627 void Nemesis_IO_Helper::put_node_map(std::vector<int>& node_mapi_in,
628  std::vector<int>& node_mapb_in,
629  std::vector<int>& node_mape_in)
630 {
632  Nemesis::ne_put_node_map(ex_id,
633  node_mapi_in.empty() ? NULL : &node_mapi_in[0],
634  node_mapb_in.empty() ? NULL : &node_mapb_in[0],
635  node_mape_in.empty() ? NULL : &node_mape_in[0], // Don't take address of empty vector...
636  this->processor_id());
637 
638  EX_CHECK_ERR(nemesis_err_flag, "Error writing Nemesis internal and border node maps to file!");
639 }
640 
641 
642 
643 
644 void Nemesis_IO_Helper::put_elem_cmap(std::vector<std::vector<int> >& elem_cmap_elem_ids_in,
645  std::vector<std::vector<int> >& elem_cmap_side_ids_in,
646  std::vector<std::vector<int> >& elem_cmap_proc_ids_in)
647 {
648  for (unsigned int i=0; i<elem_cmap_ids.size(); ++i)
649  {
651  Nemesis::ne_put_elem_cmap(ex_id,
652  this->elem_cmap_ids[i],
653  &elem_cmap_elem_ids_in[i][0],
654  &elem_cmap_side_ids_in[i][0],
655  &elem_cmap_proc_ids_in[i][0],
656  this->processor_id());
657 
658  EX_CHECK_ERR(nemesis_err_flag, "Error writing elem communication map to file!");
659  }
660 }
661 
662 
663 
664 
665 void Nemesis_IO_Helper::put_elem_map(std::vector<int>& elem_mapi_in,
666  std::vector<int>& elem_mapb_in)
667 {
669  Nemesis::ne_put_elem_map(ex_id,
670  elem_mapi_in.empty() ? NULL : &elem_mapi_in[0],
671  elem_mapb_in.empty() ? NULL : &elem_mapb_in[0],
672  this->processor_id());
673 
674  EX_CHECK_ERR(nemesis_err_flag, "Error writing Nemesis internal and border element maps to file!");
675 }
676 
677 
678 
679 
680 
681 
682 void Nemesis_IO_Helper::put_n_coord(unsigned start_node_num,
683  unsigned num_nodes_in,
684  std::vector<Real>& x_coor,
685  std::vector<Real>& y_coor,
686  std::vector<Real>& z_coor)
687 {
689  Nemesis::ne_put_n_coord(ex_id,
690  start_node_num,
691  num_nodes_in,
692  &x_coor[0],
693  &y_coor[0],
694  &z_coor[0]);
695 
696  EX_CHECK_ERR(nemesis_err_flag, "Error writing coords to file!");
697 }
698 
699 
700 
701 
702 
703 
704 
705 
706 // Note: we can't reuse the ExodusII_IO_Helper code directly, since it only runs
707 // on processor 0. TODO: We could have the body of this function as a separate
708 // function and then ExodusII_IO_Helper would only call it if on processor 0...
709 void Nemesis_IO_Helper::create(std::string filename)
710 {
711  // Fall back on double precision when necessary since ExodusII
712  // doesn't seem to support long double
713  int comp_ws = libmesh_cast_int<int>(std::min(sizeof(Real),sizeof(double)));
714  int io_ws = libmesh_cast_int<int>(std::min(sizeof(Real),sizeof(double)));
715 
716  this->ex_id = exII::ex_create(filename.c_str(), EX_CLOBBER, &comp_ws, &io_ws);
717 
718  EX_CHECK_ERR(ex_id, "Error creating Nemesis mesh file.");
719 
720  if (verbose)
721  libMesh::out << "File created successfully." << std::endl;
722 
723  this->opened_for_writing = true;
724 }
725 
726 
727 
728 
729 
730 
731 
732 
733 void Nemesis_IO_Helper::initialize(std::string title_in, const MeshBase & mesh)
734 {
735  // Make sure that the reference passed in is really a ParallelMesh
736  // const ParallelMesh& pmesh = libmesh_cast_ref<const ParallelMesh&>(mesh);
737  const MeshBase& pmesh = mesh;
738 
739  // According to Nemesis documentation, first call when writing should be to
740  // ne_put_init_info(). Our reader doesn't actually call this, but we should
741  // strive to be as close to a normal nemesis file as possible...
742  this->put_init_info(this->n_processors(), 1, "p");
743 
744 
745  // Gather global "initial" information for Nemesis. This consists of
746  // three parts labelled I, II, and III below...
747 
748  //
749  // I.) Need to compute the number of global element blocks. To be consistent with
750  // Exodus, we also incorrectly associate the number of element blocks with the
751  // number of libmesh subdomains...
752  //
753  this->compute_num_global_elem_blocks(pmesh);
754 
755  //
756  // II.) Determine the global number of nodesets by communication.
757  // This code relies on BoundaryInfo storing side and node
758  // boundary IDs separately at the time they are added to the
759  // BoundaryInfo object.
760  //
761  this->compute_num_global_nodesets(pmesh);
762 
763  //
764  // III.) Need to compute the global number of sidesets by communication:
765  // This code relies on BoundaryInfo storing side and node
766  // boundary IDs separately at the time they are added to the
767  // BoundaryInfo object.
768  //
769  this->compute_num_global_sidesets(pmesh);
770 
771  // Now write the global data obtained in steps I, II, and III to the Nemesis file
772  this->put_init_global(pmesh.parallel_n_nodes(),
773  pmesh.parallel_n_elem(),
774  this->num_elem_blks_global, /* I. */
775  this->num_node_sets_global, /* II. */
776  this->num_side_sets_global /* III. */
777  );
778 
779  // Next, we'll write global element block information to the file. This was already
780  // gathered in step I. above
782  this->global_elem_blk_cnts);
783 
784 
785  // Next, write global nodeset information to the file. This was already gathered in
786  // step II. above.
787  this->num_global_node_df_counts.clear();
788  this->num_global_node_df_counts.resize(this->global_nodeset_ids.size()); // distribution factors all zero...
792 
793 
794  // Next, write global sideset information to the file. This was already gathered in
795  // step III. above.
796  this->num_global_side_df_counts.clear();
797  this->num_global_side_df_counts.resize(this->global_sideset_ids.size()); // distribution factors all zero...
801 
802 
803  // Before we go any further we need to derive consistent node and
804  // element numbering schemes for all local elems and nodes connected
805  // to local elements.
806  //
807  // Must be called *after* the local_subdomain_counts map has been constructed
808  // by the compute_num_global_elem_blocks() function!
809  this->build_element_and_node_maps(pmesh);
810 
811  // Next step is to write "load balance" parameters. Several things need to
812  // be computed first though...
813 
814  // First we'll collect IDs of border nodes.
815  this->compute_border_node_ids(pmesh);
816 
817  // Next we'll collect numbers of internal and border elements, and internal nodes.
818  // Note: "A border node does not a border element make...", that is, just because one
819  // of an element's nodes has been identified as a border node, the element is not
820  // necessarily a border element. It must have a side on the boundary between processors,
821  // i.e. have a face neighbor with a different processor id...
823 
824  // Finally we are ready to write the loadbal information to the file
826  this->num_border_nodes,
827  this->num_external_nodes,
828  this->num_internal_elems,
829  this->num_border_elems,
830  this->num_node_cmaps,
831  this->num_elem_cmaps);
832 
833 
834  // Now we need to compute the "communication map" parameters. These are basically
835  // lists of nodes and elements which need to be communicated between different processors
836  // when the mesh file is read back in.
838 
839  // Write communication map parameters to file.
840  this->put_cmap_params(this->node_cmap_ids,
841  this->node_cmap_node_cnts,
842  this->elem_cmap_ids,
843  this->elem_cmap_elem_cnts);
844 
845 
846  // Ready the node communication maps. The node IDs which
847  // are communicated are the ones currently stored in
848  // proc_nodes_touched_intersections.
850 
851  // Write the packed node communication vectors to file.
852  this->put_node_cmap(this->node_cmap_node_ids,
853  this->node_cmap_proc_ids);
854 
855 
856  // Ready the node maps. These have nothing to do with communiction, they map
857  // the nodes to internal, border, and external nodes in the file.
858  this->compute_node_maps();
859 
860  // Call the Nemesis API to write the node maps to file.
861  this->put_node_map(this->node_mapi,
862  this->node_mapb,
863  this->node_mape);
864 
865 
866 
867  // Ready the element communication maps. This includes border
868  // element IDs, sides which are on the border, and the processors to which
869  // they are to be communicated...
871 
872 
873 
874  // Call the Nemesis API to write the packed element communication maps vectors to file
875  this->put_elem_cmap(this->elem_cmap_elem_ids,
876  this->elem_cmap_side_ids,
877  this->elem_cmap_proc_ids);
878 
879 
880 
881 
882 
883 
884  // Ready the Nemesis element maps (internal and border) for writing to file.
885  this->compute_element_maps();
886 
887  // Call the Nemesis API to write the internal and border element IDs.
888  this->put_elem_map(this->elem_mapi,
889  this->elem_mapb);
890 
891 
892  // Now write Exodus-specific initialization information, some of which is
893  // different when you are using Nemesis.
894  this->write_exodus_initialization_info(pmesh, title_in);
895 } // end initialize()
896 
897 
898 
899 
900 
901 
903  const std::string& title_in)
904 {
905  // This follows the convention of Exodus: we always write out the mesh as LIBMESH_DIM-dimensional,
906  // even if it is 2D...
907  this->num_dim = pmesh.spatial_dimension();
908 
909  this->num_elem = static_cast<unsigned int>(std::distance (pmesh.active_local_elements_begin(),
910  pmesh.active_local_elements_end()));
911 
912  // Exodus will also use *global* number of side and node sets,
913  // though it will not write out entries for all of them...
914  this->num_side_sets =
915  libmesh_cast_int<int>(this->global_sideset_ids.size());
916  this->num_node_sets =
917  libmesh_cast_int<int>(this->global_nodeset_ids.size());
918 
919  // We need to write the global number of blocks, even though this processor might not have
920  // elements in some of them!
921  this->num_elem_blk = this->num_elem_blks_global;
922 
924  title_in.c_str(),
925  this->num_dim,
926  this->num_nodes,
927  this->num_elem,
928  this->num_elem_blk,
929  this->num_node_sets,
930  this->num_side_sets);
931 
932  EX_CHECK_ERR(ex_err, "Error initializing new Nemesis file.");
933 }
934 
935 
936 
937 
938 
940 {
941  // Make sure we don't have any leftover info
942  this->elem_mapi.clear();
943  this->elem_mapb.clear();
944 
945  // Copy set contents into vectors
946  this->elem_mapi.resize(this->internal_elem_ids.size());
947  this->elem_mapb.resize(this->border_elem_ids.size());
948 
949  {
950  unsigned cnt = 0;
951  for (std::set<unsigned>::iterator it=this->internal_elem_ids.begin();
952  it != this->internal_elem_ids.end();
953  ++it, ++cnt)
954  this->elem_mapi[cnt] = libmesh_elem_num_to_exodus[(*it)]; // + 1; // Exodus is 1-based!
955  }
956 
957  {
958  unsigned cnt = 0;
959  for (std::set<unsigned>::iterator it=this->border_elem_ids.begin();
960  it != this->border_elem_ids.end();
961  ++it, ++cnt)
962  this->elem_mapb[cnt] = libmesh_elem_num_to_exodus[(*it)]; // + 1; // Exodus is 1-based!
963  }
964 }
965 
966 
967 
969 {
970  // Make sure there is no leftover information
971  this->elem_cmap_elem_ids.clear();
972  this->elem_cmap_side_ids.clear();
973  this->elem_cmap_proc_ids.clear();
974 
975  // Allocate enough space for all our element maps
976  this->elem_cmap_elem_ids.resize(this->num_elem_cmaps);
977  this->elem_cmap_side_ids.resize(this->num_elem_cmaps);
978  this->elem_cmap_proc_ids.resize(this->num_elem_cmaps);
979  {
980  unsigned cnt=0; // Index into vectors
982  it != this->proc_border_elem_sets.end();
983  ++it)
984  {
985  // Make sure the current elem_cmap_id matches the index in our map of node intersections
986  libmesh_assert_equal_to ( static_cast<unsigned>(this->elem_cmap_ids[cnt]), (*it).first );
987 
988  // Get reference to the set of IDs to be packed into the vector
989  std::set<std::pair<unsigned,unsigned> >& elem_set = (*it).second;
990 
991  // Resize the vectors to receive their payload
992  this->elem_cmap_elem_ids[cnt].resize(elem_set.size());
993  this->elem_cmap_side_ids[cnt].resize(elem_set.size());
994  this->elem_cmap_proc_ids[cnt].resize(elem_set.size());
995 
996  std::set<std::pair<unsigned,unsigned> >::iterator elem_set_iter = elem_set.begin();
997 
998  // Pack the vectors with elem IDs, side IDs, and processor IDs.
999  for (unsigned j=0; j<this->elem_cmap_elem_ids[cnt].size(); ++j, ++elem_set_iter)
1000  {
1001  this->elem_cmap_elem_ids[cnt][j] = libmesh_elem_num_to_exodus[(*elem_set_iter).first];// + 1; // Elem ID, Exodus is 1-based
1002  this->elem_cmap_side_ids[cnt][j] = (*elem_set_iter).second; // Side ID, this has already been converted above
1003  this->elem_cmap_proc_ids[cnt][j] = (*it).first; // All have the same processor ID
1004  }
1005 
1006  cnt++;// increment vector index to go to next processor
1007  }
1008  } // end scope for packing
1009 }
1010 
1011 
1012 
1013 
1014 
1016 {
1017  // Make sure we don't have any leftover information
1018  this->node_mapi.clear();
1019  this->node_mapb.clear();
1020  this->node_mape.clear();
1021 
1022  // Make sure there's enough space to hold all our node IDs
1023  this->node_mapi.resize(this->internal_node_ids.size());
1024  this->node_mapb.resize(this->border_node_ids.size());
1025 
1026  // Copy set contents into vectors
1027  //
1028  // Can't use insert, since we are copying unsigned's into vector<int>...
1029  // this->node_mapi.insert(internal_node_ids.begin(), internal_node_ids.end());
1030  // this->node_mapb.insert(boundary_node_ids.begin(), boundary_node_ids.end());
1031  {
1032  unsigned cnt = 0;
1033  for (std::set<unsigned>::iterator it=this->internal_node_ids.begin();
1034  it != this->internal_node_ids.end();
1035  ++it, ++cnt)
1036  this->node_mapi[cnt] = this->libmesh_node_num_to_exodus[*it];// + 1; // Exodus is 1-based!
1037  }
1038 
1039  {
1040  unsigned cnt=0;
1041  for (std::set<unsigned>::iterator it=this->border_node_ids.begin();
1042  it != this->border_node_ids.end();
1043  ++it, ++cnt)
1044  this->node_mapb[cnt] = this->libmesh_node_num_to_exodus[*it];// + 1; // Exodus is 1-based!
1045  }
1046 }
1047 
1048 
1049 
1050 
1051 
1053 {
1054  // Make sure there's no left-over information
1055  this->node_cmap_node_ids.clear();
1056  this->node_cmap_proc_ids.clear();
1057 
1058  // Allocate enough space for all our node maps
1059  this->node_cmap_node_ids.resize(this->num_node_cmaps);
1060  this->node_cmap_proc_ids.resize(this->num_node_cmaps);
1061  {
1062  unsigned cnt=0; // Index into vectors
1064  it != this->proc_nodes_touched_intersections.end();
1065  ++it)
1066  {
1067  // Make sure the current node_cmap_id matches the index in our map of node intersections
1068  libmesh_assert_equal_to ( static_cast<unsigned>(this->node_cmap_ids[cnt]), (*it).first );
1069 
1070  // Get reference to the set of IDs to be packed into the vector.
1071  std::set<unsigned>& node_set = (*it).second;
1072 
1073  //libMesh::out << "[" << this->processor_id() << "] node_set.size()=" << node_set.size() << std::endl;
1074 
1075  // Resize the vectors to receive their payload
1076  this->node_cmap_node_ids[cnt].resize(node_set.size());
1077  this->node_cmap_proc_ids[cnt].resize(node_set.size());
1078 
1079  std::set<unsigned>::iterator node_set_iter = node_set.begin();
1080 
1081  // Pack the vectors with node IDs and processor IDs.
1082  for (unsigned j=0; j<this->node_cmap_node_ids[cnt].size(); ++j, ++node_set_iter)
1083  {
1084  this->node_cmap_node_ids[cnt][j] = this->libmesh_node_num_to_exodus[*node_set_iter];//(*node_set_iter) + 1; // Exodus is 1-based
1085  this->node_cmap_proc_ids[cnt][j] = (*it).first;
1086  }
1087 
1088  cnt++;// increment vector index to go to next processor
1089  }
1090  } // end scope for packing
1091 
1092  // if (verbose)
1093  // libMesh::out << "Done packing." << std::endl;
1094 
1095  // Print out the vectors we just packed
1096  if (verbose)
1097  {
1098  for (unsigned i=0; i<this->node_cmap_node_ids.size(); ++i)
1099  {
1100  libMesh::out << "[" << this->processor_id() << "] nodes communicated to proc "
1101  << this->node_cmap_ids[i]
1102  << " = ";
1103  for (unsigned j=0; j<this->node_cmap_node_ids[i].size(); ++j)
1104  libMesh::out << this->node_cmap_node_ids[i][j] << " ";
1105  libMesh::out << std::endl;
1106  }
1107 
1108  for (unsigned i=0; i<this->node_cmap_node_ids.size(); ++i)
1109  {
1110  libMesh::out << "[" << this->processor_id() << "] processor ID node communicated to = ";
1111  for (unsigned j=0; j<this->node_cmap_proc_ids[i].size(); ++j)
1112  libMesh::out << this->node_cmap_proc_ids[i][j] << " ";
1113  libMesh::out << std::endl;
1114  }
1115  }
1116 }
1117 
1118 
1119 
1120 
1122 {
1123  // For the nodes, these are the number of entries in the sets in proc_nodes_touched_intersections
1124  // map computed above. Note: this map does not contain self-intersections so we can loop over it
1125  // directly.
1126  this->node_cmap_node_cnts.clear(); // Make sure we don't have any leftover information...
1127  this->node_cmap_ids.clear(); // Make sure we don't have any leftover information...
1128  this->node_cmap_node_cnts.resize(this->num_node_cmaps);
1129  this->node_cmap_ids.resize(this->num_node_cmaps);
1130 
1131  {
1132  unsigned cnt=0; // Index into the vector
1134  it != this->proc_nodes_touched_intersections.end();
1135  ++it)
1136  {
1137  this->node_cmap_ids[cnt] = (*it).first; // The ID of the proc we communicate with
1138  this->node_cmap_node_cnts[cnt] = (*it).second.size(); // The number of nodes we communicate
1139  cnt++; // increment vector index!
1140  }
1141  }
1142 
1143  // Print the packed vectors we just filled
1144  if (verbose)
1145  {
1146  libMesh::out << "[" << this->processor_id() << "] node_cmap_node_cnts = ";
1147  for (unsigned i=0; i<node_cmap_node_cnts.size(); ++i)
1148  libMesh::out << node_cmap_node_cnts[i] << ", ";
1149  libMesh::out << std::endl;
1150 
1151  libMesh::out << "[" << this->processor_id() << "] node_cmap_ids = ";
1152  for (unsigned i=0; i<node_cmap_ids.size(); ++i)
1153  libMesh::out << node_cmap_ids[i] << ", ";
1154  libMesh::out << std::endl;
1155  }
1156 
1157  // For the elements, we have not yet computed all this information..
1158  this->elem_cmap_elem_cnts.clear(); // Make sure we don't have any leftover information...
1159  this->elem_cmap_ids.clear(); // Make sure we don't have any leftover information...
1160  this->elem_cmap_elem_cnts.resize(this->num_elem_cmaps);
1161  this->elem_cmap_ids.resize(this->num_elem_cmaps);
1162 
1163  // Pack the elem_cmap_ids and elem_cmap_elem_cnts vectors
1164  {
1165  unsigned cnt=0; // Index into the vectors we're filling
1166  for (proc_border_elem_sets_iterator it = this->proc_border_elem_sets.begin();
1167  it != this->proc_border_elem_sets.end();
1168  ++it)
1169  {
1170  this->elem_cmap_ids[cnt] = (*it).first; // The ID of the proc we communicate with
1171  this->elem_cmap_elem_cnts[cnt] = (*it).second.size(); // The number of elems we communicate to/from that proc
1172  cnt++; // increment vector index!
1173  }
1174  }
1175 
1176  // Print the packed vectors we just filled
1177  if (verbose)
1178  {
1179  libMesh::out << "[" << this->processor_id() << "] elem_cmap_elem_cnts = ";
1180  for (unsigned i=0; i<elem_cmap_elem_cnts.size(); ++i)
1181  libMesh::out << elem_cmap_elem_cnts[i] << ", ";
1182  libMesh::out << std::endl;
1183 
1184  libMesh::out << "[" << this->processor_id() << "] elem_cmap_ids = ";
1185  for (unsigned i=0; i<elem_cmap_ids.size(); ++i)
1186  libMesh::out << elem_cmap_ids[i] << ", ";
1187  libMesh::out << std::endl;
1188  }
1189 }
1190 
1191 
1192 
1193 
1194 void
1196 {
1197  // Set of all local, active element IDs. After we have identified border element
1198  // IDs, the set_difference between this set and the border_elem_ids set will give us
1199  // the set of internal_elem_ids.
1200  std::set<unsigned> all_elem_ids;
1201 
1202  // A set of processor IDs which elements on this processor have as
1203  // neighbors. The size of this set will determine the number of
1204  // element communication maps in Exodus.
1205  std::set<unsigned> neighboring_processor_ids;
1206 
1207  // Will be used to create conversion objects capable of mapping libmesh
1208  // element numberings into Nemesis numberings.
1209  ExodusII_IO_Helper::ElementMaps element_mapper;
1210 
1213 
1214  for (; elem_it != elem_end; ++elem_it)
1215  {
1216  const Elem* elem = *elem_it;
1217 
1218  // Add this Elem's ID to all_elem_ids, later we will take the difference
1219  // between this set and the set of border_elem_ids, to get the set of
1220  // internal_elem_ids.
1221  all_elem_ids.insert(elem->id());
1222 
1223  // Will be set to true if element is determined to be a border element
1224  bool is_border_elem = false;
1225 
1226  // Construct a conversion object for this Element. This will help us map
1227  // Libmesh numberings into Nemesis numberings for sides.
1228  ExodusII_IO_Helper::Conversion conv = element_mapper.assign_conversion(elem->type());
1229 
1230  // Add all this element's node IDs to the set of all node IDs.
1231  // The set of internal_node_ids will be the set difference between
1232  // the set of all nodes and the set of border nodes.
1233  //
1234  // In addition, if any node of a local node is listed in the
1235  // border nodes list, then this element goes into the proc_border_elem_sets.
1236  // Note that there is not a 1:1 correspondence between
1237  // border_elem_ids and the entries which go into proc_border_elem_sets.
1238  // The latter is for communication purposes, ie determining which elements
1239  // should be shared between processors.
1240  for (unsigned int node=0; node<elem->n_nodes(); ++node)
1241  {
1242  this->nodes_attached_to_local_elems.insert(elem->node(node));
1243  } // end loop over element's nodes
1244 
1245  // Loop over element's neighbors, see if it has a neighbor which is off-processor
1246  for (unsigned int n=0; n<elem->n_neighbors(); ++n)
1247  {
1248  if (elem->neighbor(n) != NULL)
1249  {
1250  unsigned neighbor_proc_id = elem->neighbor(n)->processor_id();
1251 
1252  // If my neighbor has a different processor ID, I must be a border element.
1253  // Also track the neighboring processor ID if it is are different from our processor ID
1254  if (neighbor_proc_id != this->processor_id())
1255  {
1256  is_border_elem = true;
1257  neighboring_processor_ids.insert(neighbor_proc_id);
1258 
1259  // Convert libmesh side(n) of this element into a side ID for Nemesis
1260  unsigned nemesis_side_id = conv.get_inverse_side_map(n);
1261 
1262  if (verbose)
1263  libMesh::out << "[" << this->processor_id() << "] LibMesh side "
1264  << n
1265  << " mapped to (1-based) Exodus side "
1266  << nemesis_side_id
1267  << std::endl;
1268 
1269  // Add this element's ID and the ID of the side which is on the boundary
1270  // to the set of border elements for this processor.
1271  // Note: if the set does not already exist, this creates it.
1272  this->proc_border_elem_sets[ neighbor_proc_id ].insert( std::make_pair(elem->id(), nemesis_side_id) );
1273  }
1274  }
1275  } // end for loop over neighbors
1276 
1277  // If we're on a border element, add it to the set
1278  if (is_border_elem)
1279  this->border_elem_ids.insert( elem->id() );
1280 
1281  } // end for loop over active local elements
1282 
1283  // Take the set_difference between all elements and border elements to get internal
1284  // element IDs
1285  std::set_difference(all_elem_ids.begin(), all_elem_ids.end(),
1286  this->border_elem_ids.begin(), this->border_elem_ids.end(),
1287  std::inserter(this->internal_elem_ids, this->internal_elem_ids.end()));
1288 
1289  // Take the set_difference between all nodes and border nodes to get internal nodes
1290  std::set_difference(this->nodes_attached_to_local_elems.begin(), this->nodes_attached_to_local_elems.end(),
1291  this->border_node_ids.begin(), this->border_node_ids.end(),
1292  std::inserter(this->internal_node_ids, this->internal_node_ids.end()));
1293 
1294  if (verbose)
1295  {
1296  libMesh::out << "[" << this->processor_id() << "] neighboring_processor_ids = ";
1297  for (std::set<unsigned>::iterator it = neighboring_processor_ids.begin();
1298  it != neighboring_processor_ids.end();
1299  ++it)
1300  {
1301  libMesh::out << *it << " ";
1302  }
1303  libMesh::out << std::endl;
1304  }
1305 
1306  // The size of the neighboring_processor_ids set should be the number of element communication maps
1307  this->num_elem_cmaps = neighboring_processor_ids.size();
1308 
1309  if (verbose)
1310  libMesh::out << "[" << this->processor_id() << "] "
1311  << "Number of neighboring processor IDs="
1312  << this->num_elem_cmaps
1313  << std::endl;
1314 
1315  if (verbose)
1316  {
1317  // Print out counts of border elements for each processor
1319  it != this->proc_border_elem_sets.end(); ++it)
1320  {
1321  libMesh::out << "[" << this->processor_id() << "] "
1322  << "Proc "
1323  << (*it).first << " communicates "
1324  << (*it).second.size() << " elements." << std::endl;
1325  }
1326  }
1327 
1328  // Store the number of internal and border elements, and the number of internal nodes,
1329  // to be written to the Nemesis file.
1330  this->num_internal_elems = this->internal_elem_ids.size();
1331  this->num_border_elems = this->border_elem_ids.size();
1332  this->num_internal_nodes = this->internal_node_ids.size();
1333 
1334  if (verbose)
1335  {
1336  libMesh::out << "[" << this->processor_id() << "] num_internal_nodes=" << this->num_internal_nodes << std::endl;
1337  libMesh::out << "[" << this->processor_id() << "] num_border_nodes=" << this->num_border_nodes << std::endl;
1338  libMesh::out << "[" << this->processor_id() << "] num_border_elems=" << this->num_border_elems << std::endl;
1339  libMesh::out << "[" << this->processor_id() << "] num_internal_elems=" << this->num_internal_elems << std::endl;
1340  }
1341 }
1342 
1343 
1344 
1346 {
1347  std::set<boundary_id_type> local_side_boundary_ids;
1348 
1349  // 1.) Get reference to the set of side boundary IDs
1350  std::set<boundary_id_type> global_side_boundary_ids
1351  (pmesh.boundary_info->get_side_boundary_ids().begin(),
1352  pmesh.boundary_info->get_side_boundary_ids().end());
1353 
1354  // Save this set of local boundary side IDs for later
1355  local_side_boundary_ids = global_side_boundary_ids;
1356 
1357  // 2.) Gather boundary side IDs from other processors
1358  this->comm().set_union(global_side_boundary_ids);
1359 
1360  // 3.) Now global_side_boundary_ids actually contains a global list of all side boundary IDs
1361  this->num_side_sets_global = global_side_boundary_ids.size();
1362 
1363  // 4.) Pack these sidesets into a vector so they can be written by Nemesis
1364  this->global_sideset_ids.clear(); // Make sure there is no leftover information
1365  this->global_sideset_ids.insert(this->global_sideset_ids.end(),
1366  global_side_boundary_ids.begin(),
1367  global_side_boundary_ids.end());
1368 
1369  if (verbose)
1370  {
1371  libMesh::out << "[" << this->processor_id() << "] global_sideset_ids = ";
1372  for (unsigned i=0; i<this->global_sideset_ids.size(); ++i)
1373  libMesh::out << this->global_sideset_ids[i] << ", ";
1374  libMesh::out << std::endl;
1375  }
1376 
1377  // We also need global counts of sides in each of the sidesets. Again, there may be a
1378  // better way to do this...
1379  std::vector<dof_id_type> bndry_elem_list;
1380  std::vector<unsigned short int> bndry_side_list;
1381  std::vector<boundary_id_type> bndry_id_list;
1382  pmesh.boundary_info->build_side_list(bndry_elem_list, bndry_side_list, bndry_id_list);
1383 
1384  // Similarly to the nodes, we can't count any sides for elements which aren't local
1385  std::vector<dof_id_type>::iterator it_elem=bndry_elem_list.begin();
1386  std::vector<unsigned short>::iterator it_side=bndry_side_list.begin();
1387  std::vector<boundary_id_type>::iterator it_id=bndry_id_list.begin();
1388 
1389  // New end iterators, to be updated as we find non-local IDs
1390  std::vector<dof_id_type>::iterator new_bndry_elem_list_end = bndry_elem_list.end();
1391  std::vector<unsigned short>::iterator new_bndry_side_list_end = bndry_side_list.end();
1392  std::vector<boundary_id_type>::iterator new_bndry_id_list_end = bndry_id_list.end();
1393 
1394  for ( ; it_elem != new_bndry_elem_list_end; )
1395  {
1396  if (pmesh.elem( *it_elem )->processor_id() != this->processor_id())
1397  {
1398  // Back up the new end iterators to prepare for swap
1399  --new_bndry_elem_list_end;
1400  --new_bndry_side_list_end;
1401  --new_bndry_id_list_end;
1402 
1403  // Swap places, the non-local elem will now be "past-the-end"
1404  std::swap (*it_elem, *new_bndry_elem_list_end);
1405  std::swap (*it_side, *new_bndry_side_list_end);
1406  std::swap (*it_id, *new_bndry_id_list_end);
1407  }
1408  else // elem is local, go to next
1409  {
1410  ++it_elem;
1411  ++it_side;
1412  ++it_id;
1413  }
1414  }
1415 
1416  // Erase from "new" end to old end on each vector.
1417  bndry_elem_list.erase(new_bndry_elem_list_end, bndry_elem_list.end());
1418  bndry_side_list.erase(new_bndry_side_list_end, bndry_side_list.end());
1419  bndry_id_list.erase(new_bndry_id_list_end, bndry_id_list.end());
1420 
1421  this->num_global_side_counts.clear(); // Make sure we don't have any leftover information
1422  this->num_global_side_counts.resize(this->global_sideset_ids.size());
1423 
1424  // Get the count for each global sideset ID
1425  for (unsigned i=0; i<global_sideset_ids.size(); ++i)
1426  {
1427  this->num_global_side_counts[i] = std::count(bndry_id_list.begin(),
1428  bndry_id_list.end(),
1429  this->global_sideset_ids[i]);
1430  }
1431 
1432  if (verbose)
1433  {
1434  libMesh::out << "[" << this->processor_id() << "] num_global_side_counts = ";
1435  for (unsigned i=0; i<this->num_global_side_counts.size(); ++i)
1436  libMesh::out << this->num_global_side_counts[i] << ", ";
1437  libMesh::out << std::endl;
1438  }
1439 
1440  // Finally sum up the result
1441  this->comm().sum(this->num_global_side_counts);
1442 
1443  if (verbose)
1444  {
1445  libMesh::out << "[" << this->processor_id() << "] num_global_side_counts = ";
1446  for (unsigned i=0; i<this->num_global_side_counts.size(); ++i)
1447  libMesh::out << this->num_global_side_counts[i] << ", ";
1448  libMesh::out << std::endl;
1449  }
1450 }
1451 
1452 
1453 
1454 
1455 
1456 
1458 {
1459  std::set<boundary_id_type> local_node_boundary_ids;
1460 
1461  // 1.) Get reference to the set of node boundary IDs *for this processor*
1462  std::set<boundary_id_type> global_node_boundary_ids
1463  (pmesh.boundary_info->get_node_boundary_ids().begin(),
1464  pmesh.boundary_info->get_node_boundary_ids().end());
1465 
1466  // Save a copy of the local_node_boundary_ids...
1467  local_node_boundary_ids = global_node_boundary_ids;
1468 
1469  // 2.) Gather boundary node IDs from other processors
1470  this->comm().set_union(global_node_boundary_ids);
1471 
1472  // 3.) Now global_node_boundary_ids actually contains a global list of all node boundary IDs
1473  this->num_node_sets_global = global_node_boundary_ids.size();
1474 
1475  // 4.) Create a vector<int> from the global_node_boundary_ids set
1476  this->global_nodeset_ids.clear();
1477  this->global_nodeset_ids.insert(this->global_nodeset_ids.end(),
1478  global_node_boundary_ids.begin(),
1479  global_node_boundary_ids.end());
1480 
1481  if (verbose)
1482  {
1483  libMesh::out << "[" << this->processor_id() << "] global_nodeset_ids = ";
1484  for (unsigned i=0; i<global_nodeset_ids.size(); ++i)
1485  libMesh::out << global_nodeset_ids[i] << ", ";
1486  libMesh::out << std::endl;
1487 
1488  libMesh::out << "[" << this->processor_id() << "] local_node_boundary_ids = ";
1489  for (std::set<boundary_id_type>::iterator it = local_node_boundary_ids.begin();
1490  it != local_node_boundary_ids.end();
1491  ++it)
1492  libMesh::out << *it << ", ";
1493  libMesh::out << std::endl;
1494  }
1495 
1496  // 7.) We also need to know the number of nodes which is in each of the nodesets, globally.
1497  // There is probably a better way to do this...
1498  std::vector<dof_id_type> boundary_node_list;
1499  std::vector<boundary_id_type> boundary_node_boundary_id_list;
1500  pmesh.boundary_info->build_node_list(boundary_node_list, boundary_node_boundary_id_list);
1501 
1502  if (verbose)
1503  {
1504  libMesh::out << "[" << this->processor_id() << "] boundary_node_list.size()="
1505  << boundary_node_list.size() << std::endl;
1506  libMesh::out << "[" << this->processor_id() << "] (boundary_node_id, boundary_id) = ";
1507  for (unsigned i=0; i<boundary_node_list.size(); ++i)
1508  {
1509  libMesh::out << "(" << boundary_node_list[i] << ", " << boundary_node_boundary_id_list[i] << ") ";
1510  }
1511  libMesh::out << std::endl;
1512  }
1513 
1514  // Now get the global information. In this case, we only want to count boundary
1515  // information for nodes *owned* by this processor, so there are no duplicates.
1516 
1517  // Make sure we don't have any left over information
1518  this->num_global_node_counts.clear();
1519  this->num_global_node_counts.resize(this->global_nodeset_ids.size());
1520 
1521  // Unfortunately, we can't just count up all occurrences of a given id,
1522  // that would give us duplicate entries when we do the parallel summation.
1523  // So instead, only count entries for nodes owned by this processor.
1524  // Start by getting rid of all non-local node entries from the vectors.
1525  std::vector<dof_id_type>::iterator it_node=boundary_node_list.begin();
1526  std::vector<boundary_id_type>::iterator it_id=boundary_node_boundary_id_list.begin();
1527 
1528  // New end iterators, to be updated as we find non-local IDs
1529  std::vector<dof_id_type>::iterator new_node_list_end = boundary_node_list.end();
1530  std::vector<boundary_id_type>::iterator new_boundary_id_list_end = boundary_node_boundary_id_list.end();
1531  for ( ; it_node != new_node_list_end; )
1532  {
1533  if (pmesh.node_ptr( *it_node )->processor_id() != this->processor_id())
1534  {
1535  // Back up the new end iterators to prepare for swap
1536  --new_node_list_end;
1537  --new_boundary_id_list_end;
1538 
1539  // Swap places, the non-local node will now be "past-the-end"
1540  std::swap (*it_node, *new_node_list_end);
1541  std::swap (*it_id, *new_boundary_id_list_end);
1542  }
1543  else // node is local, go to next
1544  {
1545  ++it_node;
1546  ++it_id;
1547  }
1548  }
1549 
1550  // Erase from "new" end to old end on each vector.
1551  boundary_node_list.erase(new_node_list_end, boundary_node_list.end());
1552  boundary_node_boundary_id_list.erase(new_boundary_id_list_end, boundary_node_boundary_id_list.end());
1553 
1554  // Now we can do the local count for each ID...
1555  for (unsigned i=0; i<global_nodeset_ids.size(); ++i)
1556  {
1557  this->num_global_node_counts[i] = std::count(boundary_node_boundary_id_list.begin(),
1558  boundary_node_boundary_id_list.end(),
1559  this->global_nodeset_ids[i]);
1560  }
1561 
1562  // And finally we can sum them up
1563  this->comm().sum(this->num_global_node_counts);
1564 
1565  if (verbose)
1566  {
1567  libMesh::out << "[" << this->processor_id() << "] num_global_node_counts = ";
1568  for (unsigned i=0; i<num_global_node_counts.size(); ++i)
1569  libMesh::out << num_global_node_counts[i] << ", ";
1570  libMesh::out << std::endl;
1571  }
1572 }
1573 
1574 
1575 
1576 
1578 {
1579  // 1.) Loop over active local elements, build up set of subdomain IDs.
1580  std::set<subdomain_id_type> global_subdomain_ids;
1581 
1582  // This map keeps track of the number of elements in each subdomain over all processors
1583  std::map<subdomain_id_type, unsigned> global_subdomain_counts;
1584 
1587 
1588  for (; elem_it != elem_end; ++elem_it)
1589  {
1590  const Elem* elem = *elem_it;
1591 
1592  subdomain_id_type cur_subdomain = elem->subdomain_id();
1593 
1594 /*
1595  // We can't have a zero subdomain ID in Exodus (for some reason?)
1596  // so map zero subdomains to a max value...
1597  if (cur_subdomain == 0)
1598  cur_subdomain = std::numeric_limits<subdomain_id_type>::max();
1599 */
1600 
1601  global_subdomain_ids.insert(cur_subdomain);
1602 
1603  // Increment the count of elements in this subdomain
1604  global_subdomain_counts[cur_subdomain]++;
1605  }
1606 
1607  // We're next going to this->comm().sum the subdomain counts, so save the local counts
1608  this->local_subdomain_counts = global_subdomain_counts;
1609 
1610  {
1611  // 2.) Copy local subdomain IDs into a vector for communication
1612  std::vector<subdomain_id_type> global_subdomain_ids_vector(global_subdomain_ids.begin(),
1613  global_subdomain_ids.end());
1614 
1615  // 3.) Gather them into an enlarged vector
1616  this->comm().allgather(global_subdomain_ids_vector);
1617 
1618  // 4.) Insert any new IDs into the set (any duplicates will be dropped)
1619  global_subdomain_ids.insert(global_subdomain_ids_vector.begin(),
1620  global_subdomain_ids_vector.end());
1621  }
1622 
1623  // 5.) Now global_subdomain_ids actually contains a global list of all subdomain IDs
1624  this->num_elem_blks_global = global_subdomain_ids.size();
1625 
1626  // Print the number of elements found locally in each subdomain
1627  if (verbose)
1628  {
1629  libMesh::out << "[" << this->processor_id() << "] ";
1630  for (std::map<subdomain_id_type, unsigned>::iterator it=global_subdomain_counts.begin();
1631  it != global_subdomain_counts.end();
1632  ++it)
1633  {
1634  libMesh::out << "ID: "
1635  << static_cast<unsigned>((*it).first)
1636  << ", Count: " << (*it).second << ", ";
1637  }
1638  libMesh::out << std::endl;
1639  }
1640 
1641  // 6.) this->comm().sum up the number of elements in each block. We know the global
1642  // subdomain IDs, so pack them into a vector one by one. Use a vector of int since
1643  // that is what Nemesis wants
1644  this->global_elem_blk_cnts.resize(global_subdomain_ids.size());
1645 
1646  unsigned cnt=0;
1647  for (std::set<subdomain_id_type>::iterator it=global_subdomain_ids.begin();
1648  it != global_subdomain_ids.end(); ++it)
1649  {
1650  // Find the entry in the local map, note: if not found, will be created with 0 default value, which is OK...
1651  this->global_elem_blk_cnts[cnt++] = global_subdomain_counts[*it];
1652  }
1653 
1654  // Sum up subdomain counts from all processors
1655  this->comm().sum(this->global_elem_blk_cnts);
1656 
1657  if (verbose)
1658  {
1659  libMesh::out << "[" << this->processor_id() << "] global_elem_blk_cnts = ";
1660  for (unsigned i=0; i<this->global_elem_blk_cnts.size(); ++i)
1661  libMesh::out << this->global_elem_blk_cnts[i] << ", ";
1662  libMesh::out << std::endl;
1663  }
1664 
1665  // 7.) Create a vector<int> from the global_subdomain_ids set, for passing to Nemesis
1666  this->global_elem_blk_ids.clear();
1667  this->global_elem_blk_ids.insert(this->global_elem_blk_ids.end(), // pos
1668  global_subdomain_ids.begin(),
1669  global_subdomain_ids.end());
1670 
1671  if (verbose)
1672  {
1673  libMesh::out << "[" << this->processor_id() << "] global_elem_blk_ids = ";
1674  for (unsigned i=0; i<this->global_elem_blk_ids.size(); ++i)
1675  libMesh::out << this->global_elem_blk_ids[i] << ", ";
1676  libMesh::out << std::endl;
1677  }
1678 
1679 
1680  // 8.) We will call put_eb_info_global later, it must be called after this->put_init_global().
1681 }
1682 
1683 
1684 
1685 
1687 {
1688  // If we don't have any local subdomains, it had better be because
1689  // we don't have any local elements
1690 #ifdef DEBUG
1691  if (local_subdomain_counts.empty())
1692  {
1694  pmesh.active_local_elements_end());
1696  }
1697 #endif
1698 
1699  // Elements have to be numbered contiguously based on what block
1700  // number they are in. Therefore we have to do a bit of work to get
1701  // the block (ie subdomain) numbers first and store them off as
1702  // block_ids.
1703 
1704  // Make sure there is no leftover information in the subdomain_map, and reserve
1705  // enough space to store the elements we need.
1706  this->subdomain_map.clear();
1707  for (std::map<subdomain_id_type, unsigned>::iterator it=this->local_subdomain_counts.begin();
1708  it != this->local_subdomain_counts.end();
1709  ++it)
1710  {
1711  subdomain_id_type cur_subdomain = (*it).first;
1712 
1713 /*
1714  // We can't have a zero subdomain ID in Exodus (for some reason?)
1715  // so map zero subdomains to a max value...
1716  if (cur_subdomain == 0)
1717  cur_subdomain = std::numeric_limits<subdomain_id_type>::max();
1718 */
1719 
1720  if (verbose)
1721  {
1722  libMesh::out << "[" << this->processor_id() << "] "
1723  << "local_subdomain_counts [" << static_cast<unsigned>(cur_subdomain) << "]= "
1724  << (*it).second
1725  << std::endl;
1726  }
1727 
1728  // *it.first is the subodmain ID, *it.second is the number of elements it contains
1729  this->subdomain_map[ cur_subdomain ].reserve( (*it).second );
1730  }
1731 
1732 
1733  // First loop over the elements to figure out which elements are in which subdomain
1736 
1737  for (; elem_it != elem_end; ++elem_it)
1738  {
1739  const Elem * elem = *elem_it;
1740 
1741  // Grab the nodes while we're here.
1742  for (unsigned int n=0; n<elem->n_nodes(); ++n)
1743  this->nodes_attached_to_local_elems.insert( elem->node(n) );
1744 
1745  unsigned int cur_subdomain = elem->subdomain_id();
1746 
1747 /*
1748  // We can't have a zero subdomain ID in Exodus (for some reason?)
1749  // so map zero subdomains to a max value...
1750  if(cur_subdomain == 0)
1751  cur_subdomain = std::numeric_limits<subdomain_id_type>::max();
1752 */
1753 
1754  this->subdomain_map[cur_subdomain].push_back(elem->id());
1755  }
1756 
1757  // Set num_nodes which is used by exodusII_io_helper
1758  this->num_nodes = this->nodes_attached_to_local_elems.size();
1759 
1760  // Now come up with a 1-based numbering for these nodes
1761  this->exodus_node_num_to_libmesh.clear(); // Make sure it's empty
1762  this->exodus_node_num_to_libmesh.reserve(this->nodes_attached_to_local_elems.size());
1763 
1764  // Also make sure there's no leftover information in the map which goes the
1765  // other direction.
1766  this->libmesh_node_num_to_exodus.clear();
1767 
1768  // Set the map for nodes
1769  for(std::set<int>::iterator it = this->nodes_attached_to_local_elems.begin();
1770  it != this->nodes_attached_to_local_elems.end();
1771  ++it)
1772  {
1773  // I.e. given exodus_node_id,
1774  // exodus_node_num_to_libmesh[ exodus_node_id ] returns the libmesh ID for that node.
1775  // Note that even though most of Exodus is 1-based, this code will map an Exodus ID of
1776  // zero to some libmesh node ID. Is that a problem?
1777  this->exodus_node_num_to_libmesh.push_back(*it);
1778 
1779  // Likewise, given libmesh_node_id,
1780  // libmesh_node_num_to_exodus[ libmesh_node_id ] returns the *Exodus* ID for that node.
1781  // Unlike the exodus_node_num_to_libmesh vector above, this one is a std::map
1782  this->libmesh_node_num_to_exodus[*it] = this->exodus_node_num_to_libmesh.size(); // should never be zero...
1783  }
1784 
1785  // Now we're going to loop over the subdomain map and build a few things right
1786  // now that we'll use later.
1787 
1788  // First make sure our data structures don't have any leftover data...
1789  this->exodus_elem_num_to_libmesh.clear();
1790  this->block_ids.clear();
1791  this->libmesh_elem_num_to_exodus.clear();
1792 
1793  // Now loop over each subdomain and get a unique numbering for the elements
1794  for(std::map<subdomain_id_type, std::vector<unsigned int> >::iterator it = this->subdomain_map.begin();
1795  it != this->subdomain_map.end();
1796  ++it)
1797  {
1798  block_ids.push_back((*it).first);
1799 
1800  // Vector of element IDs for this subdomain
1801  std::vector<unsigned int>& elem_ids_this_subdomain = (*it).second;
1802 
1803  // The code below assumes this subdomain block is not empty, make sure that's the case!
1804  if (elem_ids_this_subdomain.size() == 0)
1805  {
1806  libMesh::err << "Error, no element IDs found in subdomain " << (*it).first << std::endl;
1807  libmesh_error();
1808  }
1809 
1811 
1812  // Use the first element in this block to get representative information.
1813  // Note that Exodus assumes all elements in a block are of the same type!
1814  // We are using that same assumption here!
1815  const ExodusII_IO_Helper::Conversion conv = em.assign_conversion(pmesh.elem(elem_ids_this_subdomain[0])->type());
1816  this->num_nodes_per_elem = pmesh.elem(elem_ids_this_subdomain[0])->n_nodes();
1817 
1818  // Get a reference to the connectivity vector for this subdomain. This vector
1819  // is most likely empty, we are going to fill it up now.
1820  std::vector<int>& current_block_connectivity = this->block_id_to_elem_connectivity[(*it).first];
1821 
1822  // Just in case it's not already empty...
1823  current_block_connectivity.clear();
1824  current_block_connectivity.resize(elem_ids_this_subdomain.size() * this->num_nodes_per_elem);
1825 
1826  for (unsigned int i=0; i<elem_ids_this_subdomain.size(); i++)
1827  {
1828  unsigned int elem_id = elem_ids_this_subdomain[i];
1829 
1830  // Set the number map for elements
1831  this->exodus_elem_num_to_libmesh.push_back(elem_id);
1832  this->libmesh_elem_num_to_exodus[elem_id] = this->exodus_elem_num_to_libmesh.size();
1833 
1834  const Elem * elem = pmesh.elem(elem_id);
1835 
1836  // Exodus/Nemesis want every block to have the same element type
1837  // libmesh_assert_equal_to (elem->type(), conv.get_canonical_type());
1838 
1839  // But we can get away with writing e.g. HEX8 and INFHEX8 in
1840  // the same block...
1841  libmesh_assert_equal_to (elem->n_nodes(), Elem::build(conv.get_canonical_type(), NULL)->n_nodes());
1842 
1843  for (unsigned int j=0; j < static_cast<unsigned int>(this->num_nodes_per_elem); j++)
1844  {
1845  const unsigned int connect_index = (i*this->num_nodes_per_elem)+j;
1846  const unsigned int elem_node_index = conv.get_node_map(j);
1847  current_block_connectivity[connect_index] = this->libmesh_node_num_to_exodus[elem->node(elem_node_index)];
1848  }
1849  } // End loop over elems in this subdomain
1850  } // end loop over subdomain_map
1851 }
1852 
1853 
1854 
1855 
1856 
1858 {
1859  // The set which will eventually contain the IDs of "border nodes". These are nodes
1860  // that lie on the boundary between one or more processors.
1861  //std::set<unsigned> border_node_ids;
1862 
1863  // map from processor ID to set of nodes which elements from this processor "touch",
1864  // that is,
1865  // proc_nodes_touched[p] = (set all node IDs found in elements owned by processor p)
1866  std::map<unsigned, std::set<unsigned> > proc_nodes_touched;
1867 
1868 
1869  // We are going to create a lot of intermediate data structures here, so make sure
1870  // as many as possible all cleaned up by creating scope!
1871  {
1872  // Loop over active (not just active local) elements, make sets of node IDs for each
1873  // processor which has an element that "touches" a node.
1874  {
1877 
1878  for (; elem_it != elem_end; ++elem_it)
1879  {
1880  const Elem* elem = *elem_it;
1881 
1882  // Get reference to the set for this processor. If it does not exist
1883  // it will be created.
1884  std::set<unsigned>& set_p = proc_nodes_touched[ elem->processor_id() ];
1885 
1886  // Insert all nodes touched by this element into the set
1887  for (unsigned int node=0; node<elem->n_nodes(); ++node)
1888  set_p.insert(elem->node(node));
1889  }
1890  }
1891 
1892  // The number of node communication maps is the number of other processors
1893  // with which we share nodes. (I think.) This is just the size of the map we just
1894  // created, minus 1.
1895  this->num_node_cmaps = proc_nodes_touched.size() - 1;
1896 
1897  // If we've got no elements on this processor and haven't touched
1898  // any nodes, however, then that's 0 other processors with which
1899  // we share nodes, not -1.
1900  if (this->num_node_cmaps == -1)
1901  {
1903  this->num_node_cmaps = 0;
1904  }
1905 
1906  // We can't be connecting to more processors than exist outside
1907  // ourselves
1908  libmesh_assert_less (static_cast<unsigned>(this->num_node_cmaps), this->n_processors());
1909 
1910  if (verbose)
1911  {
1912  libMesh::out << "[" << this->processor_id()
1913  << "] proc_nodes_touched contains "
1914  << proc_nodes_touched.size()
1915  << " sets of nodes."
1916  << std::endl;
1917 
1918  for (proc_nodes_touched_iterator it = proc_nodes_touched.begin();
1919  it != proc_nodes_touched.end();
1920  ++it)
1921  {
1922  libMesh::out << "[" << this->processor_id()
1923  << "] proc_nodes_touched[" << (*it).first << "] has "
1924  << (*it).second.size()
1925  << " entries."
1926  << std::endl;
1927  }
1928  }
1929 
1930 
1931  // Loop over all the sets we just created and compute intersections with the
1932  // this processor's set. Obviously, don't intersect with ourself.
1933  for (proc_nodes_touched_iterator it = proc_nodes_touched.begin();
1934  it != proc_nodes_touched.end();
1935  ++it)
1936  {
1937  // Don't compute intersections with ourself
1938  if ((*it).first == this->processor_id())
1939  continue;
1940 
1941  // Otherwise, compute intersection with other processor and ourself
1942  std::set<unsigned>& my_set = proc_nodes_touched[this->processor_id()];
1943  std::set<unsigned>& other_set = (*it).second;
1944  std::set<unsigned>& result_set = this->proc_nodes_touched_intersections[ (*it).first ]; // created if does not exist
1945 
1946  std::set_intersection(my_set.begin(), my_set.end(),
1947  other_set.begin(), other_set.end(),
1948  std::inserter(result_set, result_set.end()));
1949  }
1950 
1951  if (verbose)
1952  {
1954  it != this->proc_nodes_touched_intersections.end();
1955  ++it)
1956  {
1957  libMesh::out << "[" << this->processor_id()
1958  << "] this->proc_nodes_touched_intersections[" << (*it).first << "] has "
1959  << (*it).second.size()
1960  << " entries."
1961  << std::endl;
1962  }
1963  }
1964 
1965  // Compute the set_union of all the preceding intersections. This will be the set of
1966  // border node IDs for this processor.
1968  it != this->proc_nodes_touched_intersections.end();
1969  ++it)
1970  {
1971  std::set<unsigned>& other_set = (*it).second;
1972  std::set<unsigned> intermediate_result; // Don't think we can insert into one of the sets we're unioning...
1973 
1974  std::set_union(this->border_node_ids.begin(), this->border_node_ids.end(),
1975  other_set.begin(), other_set.end(),
1976  std::inserter(intermediate_result, intermediate_result.end()));
1977 
1978  // Swap our intermediate result into the final set
1979  this->border_node_ids.swap(intermediate_result);
1980  }
1981 
1982  if (verbose)
1983  {
1984  libMesh::out << "[" << this->processor_id()
1985  << "] border_node_ids.size()=" << this->border_node_ids.size()
1986  << std::endl;
1987  }
1988  } // end scope for border node ID creation
1989 
1990  // Store the number of border node IDs to be written to Nemesis file
1991  this->num_border_nodes = this->border_node_ids.size();
1992 }
1993 
1994 
1995 
1996 
1997 
1999 {
2000  // Write the nodesets. In Nemesis, the idea is to "create space" for the global
2001  // set of boundary nodesets, but to only write node IDs which are local to the current
2002  // processor. This is what is done in Nemesis files created by the "loadbal" script.
2003 
2004  // Store a map of vectors for boundary node IDs on this processor.
2005  // Use a vector of int here so it can be passed directly to Exodus.
2006  std::map<boundary_id_type, std::vector<int> > local_node_boundary_id_lists;
2007  typedef std::map<boundary_id_type, std::vector<int> >::iterator local_node_boundary_id_lists_iterator;
2008 
2009  // FIXME: We should build this list only one time!! We already built it above, but we
2010  // did not have the libmesh to exodus node mapping at that time... for now we'll just
2011  // build it here again, hopefully it's small relative to the size of the entire mesh.
2012  std::vector<dof_id_type> boundary_node_list;
2013  std::vector<boundary_id_type> boundary_node_boundary_id_list;
2014  mesh.boundary_info->build_node_list(boundary_node_list, boundary_node_boundary_id_list);
2015 
2016  if (verbose)
2017  {
2018  libMesh::out << "[" << this->processor_id() << "] boundary_node_list.size()="
2019  << boundary_node_list.size() << std::endl;
2020  libMesh::out << "[" << this->processor_id() << "] (boundary_node_id, boundary_id) = ";
2021  for (unsigned i=0; i<boundary_node_list.size(); ++i)
2022  {
2023  libMesh::out << "(" << boundary_node_list[i] << ", " << boundary_node_boundary_id_list[i] << ") ";
2024  }
2025  libMesh::out << std::endl;
2026  }
2027 
2028  // For each node in the node list, add it to the vector of node IDs for that
2029  // set for the local processor. This will be used later when writing Exodus
2030  // nodesets.
2031  for (unsigned i=0; i<boundary_node_list.size(); ++i)
2032  {
2033  // Don't try to grab a reference to the vector unless the current node is attached
2034  // to a local element. Otherwise, another processor will be responsible for writing it in its nodeset.
2035  std::map<int, int>::iterator it = this->libmesh_node_num_to_exodus.find( boundary_node_list[i] );
2036 
2037  if ( it != this->libmesh_node_num_to_exodus.end() )
2038  {
2039  // Get reference to the vector where this node ID will be inserted. If it
2040  // doesn't yet exist, this will create it.
2041  std::vector<int>& current_id_set = local_node_boundary_id_lists[ boundary_node_boundary_id_list[i] ];
2042 
2043  // Push back Exodus-mapped node ID for this set
2044  // TODO: reserve space in these vectors somehow.
2045  current_id_set.push_back( (*it).second );
2046  }
2047  }
2048 
2049  // See what we got
2050  if (verbose)
2051  {
2052  for(std::map<boundary_id_type, std::vector<int> >::iterator it = local_node_boundary_id_lists.begin();
2053  it != local_node_boundary_id_lists.end();
2054  ++it)
2055  {
2056  libMesh::out << "[" << this->processor_id() << "] ID: " << (*it).first << ", ";
2057 
2058  std::vector<int>& current_id_set = (*it).second;
2059 
2060  // Libmesh node ID (Exodus Node ID)
2061  for (unsigned j=0; j<current_id_set.size(); ++j)
2062  libMesh::out << current_id_set[j]
2063  << ", ";
2064 
2065  libMesh::out << std::endl;
2066  }
2067  }
2068 
2069  // Loop over *global* nodeset IDs, call the Exodus API. Note that some nodesets may be empty
2070  // for a given processor.
2071  for (unsigned i=0; i<this->global_nodeset_ids.size(); ++i)
2072  {
2073  if (verbose)
2074  {
2075  libMesh::out << "[" << this->processor_id()
2076  << "] Writing out Exodus nodeset info for ID: " << global_nodeset_ids[i] << std::endl;
2077  }
2078 
2079  // Convert current global_nodeset_id into an exodus ID, which can't be zero...
2080  int exodus_id = global_nodeset_ids[i];
2081 
2082 /*
2083  // Exodus can't handle zero nodeset IDs (?) Use max short here since
2084  // when libmesh reads it back in, it will want to store it as a short...
2085  if (exodus_id==0)
2086  exodus_id = std::numeric_limits<short>::max();
2087 */
2088 
2089  // Try to find this boundary ID in the local list we created
2090  local_node_boundary_id_lists_iterator it =
2091  local_node_boundary_id_lists.find(this->global_nodeset_ids[i]);
2092 
2093  // No nodes found for this boundary ID on this processor
2094  if (it == local_node_boundary_id_lists.end())
2095  {
2096  if (verbose)
2097  libMesh::out << "[" << this->processor_id()
2098  << "] No nodeset data for ID: " << global_nodeset_ids[i]
2099  << " on this processor." << std::endl;
2100 
2101  // Call the Exodus interface to write the parameters of this node set
2103  exodus_id,
2104  0, /* No nodes for this ID */
2105  0 /* No distribution factors */);
2106  EX_CHECK_ERR(this->ex_err, "Error writing nodeset parameters in Nemesis");
2107 
2108  }
2109  else // Boundary ID *was* found in list
2110  {
2111  // Get reference to the vector of node IDs
2112  std::vector<int>& current_nodeset_ids = (*it).second;
2113 
2114  // Call the Exodus interface to write the parameters of this node set
2116  exodus_id,
2117  current_nodeset_ids.size(),
2118  0 /* No distribution factors */);
2119 
2120  EX_CHECK_ERR(this->ex_err, "Error writing nodeset parameters in Nemesis");
2121 
2122  // Call Exodus interface to write the actual node IDs for this boundary ID
2123  this->ex_err = exII::ex_put_node_set(this->ex_id,
2124  exodus_id,
2125  &current_nodeset_ids[0]);
2126 
2127  EX_CHECK_ERR(this->ex_err, "Error writing nodesets in Nemesis");
2128 
2129  }
2130  } // end loop over global nodeset IDs
2131 }
2132 
2133 
2134 
2135 
2137 {
2138  // Write the sidesets. In Nemesis, the idea is to "create space" for the global
2139  // set of boundary sidesets, but to only write sideset IDs which are local to the current
2140  // processor. This is what is done in Nemesis files created by the "loadbal" script.
2141  // See also: ExodusII_IO_Helper::write_sidesets()...
2142 
2143 
2144  // Store a map of vectors for boundary side IDs on this processor.
2145  // Use a vector of int here so it can be passed directly to Exodus.
2146  std::map<boundary_id_type, std::vector<int> > local_elem_boundary_id_lists;
2147  std::map<boundary_id_type, std::vector<int> > local_elem_boundary_id_side_lists;
2148  typedef std::map<boundary_id_type, std::vector<int> >::iterator local_elem_boundary_id_lists_iterator;
2149 
2151 
2152  // FIXME: We already built this list once, we should reuse that information!
2153  std::vector< dof_id_type > bndry_elem_list;
2154  std::vector< unsigned short int > bndry_side_list;
2155  std::vector< boundary_id_type > bndry_id_list;
2156 
2157  mesh.boundary_info->build_side_list(bndry_elem_list, bndry_side_list, bndry_id_list);
2158 
2159  // Integer looping, skipping non-local elements
2160  for (unsigned i=0; i<bndry_elem_list.size(); ++i)
2161  {
2162  // Get pointer to current Elem
2163  const Elem* elem = mesh.elem(bndry_elem_list[i]);
2164 
2165  // If element is local, process it
2166  if (elem->processor_id() == this->processor_id())
2167  {
2168  std::vector<const Elem*> family;
2169 #ifdef LIBMESH_ENABLE_AMR
2170  // We need to build up active elements if AMR is enabled and add
2171  // them to the exodus sidesets instead of the potentially inactive "parent" elements
2172  // Technically we don't need to "reset" the tree since the vector was just created.
2173  elem->active_family_tree_by_side(family, bndry_side_list[i], /*reset tree=*/false);
2174 #else
2175  // If AMR is not even enabled, just push back the element itself
2176  family.push_back( elem );
2177 #endif
2178 
2179  // Loop over all the elements in the family tree, store their converted IDs
2180  // and side IDs to the map's vectors. TODO: Somehow reserve enough space for these
2181  // push_back's...
2182  for(unsigned int j=0; j<family.size(); ++j)
2183  {
2184  const ExodusII_IO_Helper::Conversion conv = em.assign_conversion(mesh.elem(family[j]->id())->type());
2185 
2186  // Use the libmesh to exodus datastructure map to get the proper sideset IDs
2187  // The datastructure contains the "collapsed" contiguous ids.
2188  //
2189  // We know the parent element is local, but let's be absolutely sure that all the children have been
2190  // actually mapped to Exodus IDs before we blindly try to add them...
2191  std::map<int,int>::iterator it = this->libmesh_elem_num_to_exodus.find( family[j]->id() );
2192  if (it != this->libmesh_elem_num_to_exodus.end())
2193  {
2194  local_elem_boundary_id_lists[ bndry_id_list[i] ].push_back( (*it).second );
2195  local_elem_boundary_id_side_lists[ bndry_id_list[i] ].push_back(conv.get_inverse_side_map( bndry_side_list[i] ));
2196  }
2197  else
2198  {
2199  libMesh::err << "Error, no Exodus mapping for Elem "
2200  << family[j]->id()
2201  << " on processor "
2202  << this->processor_id()
2203  << std::endl;
2204  libmesh_error();
2205  }
2206  }
2207  }
2208  }
2209 
2210 
2211  // Loop over *global* sideset IDs, call the Exodus API. Note that some sidesets may be empty
2212  // for a given processor.
2213  for (unsigned i=0; i<this->global_sideset_ids.size(); ++i)
2214  {
2215  if (verbose)
2216  {
2217  libMesh::out << "[" << this->processor_id()
2218  << "] Writing out Exodus sideset info for ID: " << global_sideset_ids[i] << std::endl;
2219  }
2220 
2221  // Convert current global_sideset_id into an exodus ID, which can't be zero...
2222  int exodus_id = global_sideset_ids[i];
2223 
2224 /*
2225  // Exodus can't handle zero sideset IDs (?) Use max short here since
2226  // when libmesh reads it back in, it will want to store it as a short...
2227  if (exodus_id==0)
2228  exodus_id = std::numeric_limits<short>::max();
2229 */
2230 
2231  // Try to find this boundary ID in the local list we created
2232  local_elem_boundary_id_lists_iterator it =
2233  local_elem_boundary_id_lists.find(this->global_sideset_ids[i]);
2234 
2235  // No sides found for this boundary ID on this processor
2236  if (it == local_elem_boundary_id_lists.end())
2237  {
2238  if (verbose)
2239  libMesh::out << "[" << this->processor_id()
2240  << "] No sideset data for ID: " << global_sideset_ids[i]
2241  << " on this processor." << std::endl;
2242 
2243  // Call the Exodus interface to write the parameters of this side set
2245  exodus_id,
2246  0, /* No sides for this ID */
2247  0 /* No distribution factors */);
2248  EX_CHECK_ERR(this->ex_err, "Error writing sideset parameters in Nemesis");
2249 
2250  }
2251  else // Boundary ID *was* found in list
2252  {
2253  // Get iterator to sides vector as well
2254  local_elem_boundary_id_lists_iterator it_sides =
2255  local_elem_boundary_id_side_lists.find(this->global_sideset_ids[i]);
2256 
2257  libmesh_assert (it_sides != local_elem_boundary_id_side_lists.end());
2258 
2259  // Get reference to the vector of elem IDs
2260  std::vector<int>& current_sideset_elem_ids = (*it).second;
2261 
2262  // Get reference to the vector of side IDs
2263  std::vector<int>& current_sideset_side_ids = (*it_sides).second;
2264 
2265  // Call the Exodus interface to write the parameters of this side set
2267  exodus_id,
2268  current_sideset_elem_ids.size(),
2269  0 /* No distribution factors */);
2270 
2271  EX_CHECK_ERR(this->ex_err, "Error writing sideset parameters in Nemesis");
2272 
2273  // Call Exodus interface to write the actual side IDs for this boundary ID
2274  this->ex_err = exII::ex_put_side_set(this->ex_id,
2275  exodus_id,
2276  &current_sideset_elem_ids[0],
2277  &current_sideset_side_ids[0]);
2278 
2279  EX_CHECK_ERR(this->ex_err, "Error writing sidesets in Nemesis");
2280  }
2281  } // end for loop over global sideset IDs
2282 }
2283 
2284 
2285 
2287 {
2288  // Make sure that the reference passed in is really a ParallelMesh
2289  // const ParallelMesh& pmesh = libmesh_cast_ref<const ParallelMesh&>(mesh);
2290 
2291  unsigned local_num_nodes = this->exodus_node_num_to_libmesh.size();
2292 
2293  x.resize(local_num_nodes);
2294  y.resize(local_num_nodes);
2295  z.resize(local_num_nodes);
2296 
2297  // Just loop over our list outputing the nodes the way we built the map
2298  for (unsigned int i=0; i<local_num_nodes; ++i)
2299  {
2300  const Node & node = *mesh.node_ptr(this->exodus_node_num_to_libmesh[i]);
2301  x[i]=node(0);
2302  y[i]=node(1);
2303  z[i]=node(2);
2304  }
2305 
2306  if (local_num_nodes)
2307  {
2308  // Call Exodus API to write nodal coordinates...
2309  ex_err = exII::ex_put_coord(ex_id, &x[0], &y[0], &z[0]);
2310  EX_CHECK_ERR(ex_err, "Error writing node coordinates");
2311 
2312  // And write the nodal map we created for them
2314  EX_CHECK_ERR(ex_err, "Error writing node num map");
2315  }
2316  else // Does the Exodus API want us to write empty nodal coordinates?
2317  {
2318  ex_err = exII::ex_put_coord(ex_id, NULL, NULL, NULL);
2319  EX_CHECK_ERR(ex_err, "Error writing empty node coordinates");
2320 
2322  EX_CHECK_ERR(ex_err, "Error writing empty node num map");
2323  }
2324 }
2325 
2326 
2327 
2328 
2329 
2331 {
2332 
2333  // Loop over all blocks, even if we don't have elements in each block.
2334  // If we don't have elements we need to write out a 0 for that block...
2335  for (unsigned int i=0; i<static_cast<unsigned>(this->num_elem_blks_global); ++i)
2336  {
2337  // Search for the current global block ID in the map
2338  std::map<int, std::vector<int> >::iterator it =
2339  this->block_id_to_elem_connectivity.find( this->global_elem_blk_ids[i] );
2340 
2341  // If not found, write a zero to file....
2342  if (it == this->block_id_to_elem_connectivity.end())
2343  {
2344  this->ex_err = exII::ex_put_elem_block(this->ex_id,
2345  this->global_elem_blk_ids[i],
2346  "Empty",
2347  0, /* n. elements in this block */
2348  0, /* n. nodes per element */
2349  0); /* number of attributes per element */
2350 
2351  EX_CHECK_ERR(this->ex_err, "Error writing element block from Nemesis.");
2352  }
2353 
2354  // Otherwise, write the actual block information and connectivity to file
2355  else
2356  {
2357  int block = (*it).first;
2358  std::vector<int> & this_block_connectivity = (*it).second;
2359  std::vector<unsigned int> & elements_in_this_block = subdomain_map[block];
2360 
2362 
2363  //Use the first element in this block to get representative information.
2364  //Note that Exodus assumes all elements in a block are of the same type!
2365  //We are using that same assumption here!
2366  const ExodusII_IO_Helper::Conversion conv =
2367  em.assign_conversion(mesh.elem(elements_in_this_block[0])->type());
2368 
2369  this->num_nodes_per_elem = mesh.elem(elements_in_this_block[0])->n_nodes();
2370 
2372  block,
2373  conv.exodus_elem_type().c_str(),
2374  elements_in_this_block.size(),
2376  0);
2377  EX_CHECK_ERR(ex_err, "Error writing element block from Nemesis.");
2378 
2380  block,
2381  &this_block_connectivity[0]);
2382  EX_CHECK_ERR(ex_err, "Error writing element connectivities from Nemesis.");
2383  }
2384  } // end loop over global block IDs
2385 
2386  // Only call this once, not in the loop above!
2389  EX_CHECK_ERR(ex_err, "Error writing element map");
2390 }
2391 
2392 
2393 
2394 
2395 
2396 void Nemesis_IO_Helper::write_nodal_solution(const std::vector<Number> & values, const std::vector<std::string> names, int timestep)
2397 {
2398  int num_vars = names.size();
2399  //int num_values = values.size(); // Not used?
2400 
2401  for (int c=0; c<num_vars; c++)
2402  {
2403  std::vector<Number> cur_soln(num_nodes);
2404 
2405  //Copy out this variable's solution
2406  for(int i=0; i<num_nodes; i++)
2407  cur_soln[i] = values[this->exodus_node_num_to_libmesh[i]*num_vars + c];
2408 
2409  write_nodal_values(c+1,cur_soln,timestep);
2410  }
2411 }
2412 
2413 
2414 
2415 
2416 std::string Nemesis_IO_Helper::construct_nemesis_filename(const std::string& base_filename)
2417 {
2418  // Build a filename for this processor. This code is cut-n-pasted from the read function
2419  // and should probably be put into a separate function...
2420  std::ostringstream file_oss;
2421 
2422  // We have to be a little careful here: Nemesis left pads its file
2423  // numbers based on the number of processors, so for example on 10
2424  // processors, we'd have:
2425  // mesh.e.10.00
2426  // mesh.e.10.01
2427  // mesh.e.10.02
2428  // ...
2429  // mesh.e.10.09
2430 
2431  // And on 100 you'd have:
2432  // mesh.e.100.000
2433  // mesh.e.100.001
2434  // ...
2435  // mesh.e.128.099
2436 
2437  // Find the length of the highest processor ID
2438  file_oss << (this->n_processors());
2439  unsigned field_width = file_oss.str().size();
2440 
2441  if (verbose)
2442  libMesh::out << "field_width=" << field_width << std::endl;
2443 
2444  file_oss.str(""); // reset the string stream
2445  file_oss << base_filename
2446  << '.' << this->n_processors()
2447  << '.' << std::setfill('0') << std::setw(field_width) << this->processor_id();
2448 
2449  // Return the resulting string
2450  return file_oss.str();
2451 }
2452 
2453 } // namespace libMesh
2454 
2455 #endif // #if defined(LIBMESH_HAVE_NEMESIS_API) && defined(LIBMESH_HAVE_EXODUS_API)

Site Created By: libMesh Developers
Last modified: February 07 2014 16:57:06 UTC

Hosted By:
SourceForge.net Logo