dof_map.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2014 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 
19 
20 // C++ Includes -------------------------------------
21 #include <set>
22 #include <algorithm> // for std::fill, std::equal_range, std::max, std::lower_bound, etc.
23 #include <sstream>
24 
25 // Local Includes -----------------------------------
27 #include "libmesh/dense_matrix.h"
30 #include "libmesh/dof_map.h"
31 #include "libmesh/elem.h"
32 #include "libmesh/fe_interface.h"
33 #include "libmesh/fe_type.h"
34 #include "libmesh/fe_base.h" // FEBase::build() for continuity test
36 #include "libmesh/mesh_base.h"
37 #include "libmesh/mesh_tools.h"
38 #include "libmesh/numeric_vector.h"
39 #include "libmesh/parallel.h"
41 #include "libmesh/sparse_matrix.h"
43 #include "libmesh/string_to_enum.h"
44 #include "libmesh/threads.h"
45 
46 
47 
48 namespace libMesh
49 {
50 
51 // ------------------------------------------------------------
52 // DofMap member functions
54  (const MeshBase& mesh) const
55 {
56  libmesh_assert (mesh.is_prepared());
57  libmesh_assert (this->n_variables());
58 
59  START_LOG("build_sparsity()", "DofMap");
60 
61  // Compute the sparsity structure of the global matrix. This can be
62  // fed into a PetscMatrix to allocate exacly the number of nonzeros
63  // necessary to store the matrix. This algorithm should be linear
64  // in the (# of elements)*(# nodes per element)
65 
66  // We can be more efficient in the threaded sparsity pattern assembly
67  // if we don't need the exact pattern. For some sparse matrix formats
68  // a good upper bound will suffice.
69 
70  // See if we need to include sparsity pattern entries for coupling
71  // between neighbor dofs
72  bool implicit_neighbor_dofs = this->use_coupled_neighbor_dofs(mesh);
73 
74  // We can compute the sparsity pattern in parallel on multiple
75  // threads. The goal is for each thread to compute the full sparsity
76  // pattern for a subset of elements. These sparsity patterns can
77  // be efficiently merged in the SparsityPattern::Build::join()
78  // method, especially if there is not too much overlap between them.
79  // Even better, if the full sparsity pattern is not needed then
80  // the number of nonzeros per row can be estimated from the
81  // sparsity patterns created on each thread.
83  (new SparsityPattern::Build (mesh,
84  *this,
85  this->_dof_coupling,
86  implicit_neighbor_dofs,
87  need_full_sparsity_pattern));
88 
90  mesh.active_local_elements_end()), *sp);
91 
92  sp->parallel_sync();
93 
94 #ifndef NDEBUG
95  // Avoid declaring these variables unless asserts are enabled.
96  const processor_id_type proc_id = mesh.processor_id();
97  const dof_id_type n_dofs_on_proc = this->n_dofs_on_processor(proc_id);
98 #endif
99  libmesh_assert_equal_to (sp->sparsity_pattern.size(), n_dofs_on_proc);
100 
101  STOP_LOG("build_sparsity()", "DofMap");
102 
103  // Check to see if we have any extra stuff to add to the sparsity_pattern
104  if (_extra_sparsity_function)
105  {
106  if (_augment_sparsity_pattern)
107  {
108  libmesh_here();
109  libMesh::out << "WARNING: You have specified both an extra sparsity function and object.\n"
110  << " Are you sure this is what you meant to do??"
111  << std::endl;
112  }
113 
114  _extra_sparsity_function
115  (sp->sparsity_pattern, sp->n_nz,
116  sp->n_oz, _extra_sparsity_context);
117  }
118 
119  if (_augment_sparsity_pattern)
120  _augment_sparsity_pattern->augment_sparsity_pattern
121  (sp->sparsity_pattern, sp->n_nz, sp->n_oz);
122 
123  return sp;
124 }
125 
126 
127 
128 DofMap::DofMap(const unsigned int number,
129  const ParallelObject &parent_decomp) :
130  ParallelObject (parent_decomp),
131  _dof_coupling(NULL),
132  _variables(),
133  _variable_groups(),
134  _sys_number(number),
135  _matrices(),
136  _first_df(),
137  _end_df(),
138  _send_list(),
139  _augment_sparsity_pattern(NULL),
140  _extra_sparsity_function(NULL),
141  _extra_sparsity_context(NULL),
142  _augment_send_list(NULL),
143  _extra_send_list_function(NULL),
144  _extra_send_list_context(NULL),
145  need_full_sparsity_pattern(false),
146  _n_nz(NULL),
147  _n_oz(NULL),
148  _n_dfs(0),
149  _n_SCALAR_dofs(0)
150 #ifdef LIBMESH_ENABLE_AMR
151  , _n_old_dfs(0),
152  _first_old_df(),
153  _end_old_df()
154 #endif
155 #ifdef LIBMESH_ENABLE_CONSTRAINTS
156  , _dof_constraints()
157  , _primal_constraint_values()
158  , _adjoint_constraint_values()
159 #endif
160 #ifdef LIBMESH_ENABLE_NODE_CONSTRAINTS
161  , _node_constraints()
162 #endif
163 #ifdef LIBMESH_ENABLE_PERIODIC
164  , _periodic_boundaries(new PeriodicBoundaries)
165 #endif
166 #ifdef LIBMESH_ENABLE_DIRICHLET
167  , _dirichlet_boundaries(new DirichletBoundaries)
168  , _adjoint_dirichlet_boundaries()
169 #endif
170 {
171  _matrices.clear();
172 }
173 
174 
175 
176 // Destructor
178 {
179  this->clear();
180 #ifdef LIBMESH_ENABLE_PERIODIC
181  delete _periodic_boundaries;
182 #endif
183 #ifdef LIBMESH_ENABLE_DIRICHLET
184  delete _dirichlet_boundaries;
185  for (unsigned int q = 0; q != _adjoint_dirichlet_boundaries.size(); ++q)
187 #endif
188 }
189 
190 
191 #ifdef LIBMESH_ENABLE_PERIODIC
192 
193 bool DofMap::is_periodic_boundary (const boundary_id_type boundaryid) const
194 {
195  if (_periodic_boundaries->count(boundaryid) != 0)
196  return true;
197 
198  return false;
199 }
200 
201 #endif
202 
203 
204 
205 // void DofMap::add_variable (const Variable &var)
206 // {
207 // libmesh_error();
208 // _variables.push_back (var);
209 // }
210 
211 
212 
214 {
215  _variable_groups.push_back(var_group);
216 
217  VariableGroup &new_var_group = _variable_groups.back();
218 
219  for (unsigned int var=0; var<new_var_group.n_variables(); var++)
220  _variables.push_back (new_var_group(var));
221 }
222 
223 
224 
226 {
227  parallel_object_only();
228 
229  // We shouldn't be trying to re-attach the same matrices repeatedly
230  libmesh_assert (std::find(_matrices.begin(), _matrices.end(),
231  &matrix) == _matrices.end());
232 
233  _matrices.push_back(&matrix);
234 
235  matrix.attach_dof_map (*this);
236 
237  // If we've already computed sparsity, then it's too late
238  // to wait for "compute_sparsity" to help with sparse matrix
239  // initialization, and we need to handle this matrix individually
240  bool computed_sparsity_already =
241  ((_n_nz && !_n_nz->empty()) ||
242  (_n_oz && !_n_oz->empty()));
243  this->comm().max(computed_sparsity_already);
244  if (computed_sparsity_already &&
246  {
247  // We'd better have already computed the full sparsity pattern
248  // if we need it here
251 
253  }
254 
255  if (matrix.need_full_sparsity_pattern())
257 }
258 
259 
260 
262 {
263  return (std::find(_matrices.begin(), _matrices.end(),
264  &matrix) != _matrices.end());
265 }
266 
267 
268 
270 {
271  return mesh.node_ptr(i);
272 }
273 
274 
275 
277 {
278  return mesh.elem(i);
279 }
280 
281 
282 
283 template <typename iterator_type>
284 void DofMap::set_nonlocal_dof_objects(iterator_type objects_begin,
285  iterator_type objects_end,
286  MeshBase &mesh,
287  dofobject_accessor objects)
288 {
289  // This function must be run on all processors at once
290  parallel_object_only();
291 
292  // First, iterate over local objects to find out how many
293  // are on each processor
294  std::vector<dof_id_type>
295  ghost_objects_from_proc(this->n_processors(), 0);
296 
297  iterator_type it = objects_begin;
298 
299  for (; it != objects_end; ++it)
300  {
301  DofObject *obj = *it;
302 
303  if (obj)
304  {
305  processor_id_type obj_procid = obj->processor_id();
306  // We'd better be completely partitioned by now
307  libmesh_assert_not_equal_to (obj_procid, DofObject::invalid_processor_id);
308  ghost_objects_from_proc[obj_procid]++;
309  }
310  }
311 
312  std::vector<dof_id_type> objects_on_proc(this->n_processors(), 0);
313  this->comm().allgather(ghost_objects_from_proc[this->processor_id()],
314  objects_on_proc);
315 
316 #ifdef DEBUG
317  for (processor_id_type p=0; p != this->n_processors(); ++p)
318  libmesh_assert_less_equal (ghost_objects_from_proc[p], objects_on_proc[p]);
319 #endif
320 
321  // Request sets to send to each processor
322  std::vector<std::vector<dof_id_type> >
323  requested_ids(this->n_processors());
324 
325  // We know how many of our objects live on each processor, so
326  // reserve() space for requests from each.
327  for (processor_id_type p=0; p != this->n_processors(); ++p)
328  if (p != this->processor_id())
329  requested_ids[p].reserve(ghost_objects_from_proc[p]);
330 
331  for (it = objects_begin; it != objects_end; ++it)
332  {
333  DofObject *obj = *it;
335  requested_ids[obj->processor_id()].push_back(obj->id());
336  }
337 #ifdef DEBUG
338  for (processor_id_type p=0; p != this->n_processors(); ++p)
339  libmesh_assert_equal_to (requested_ids[p].size(), ghost_objects_from_proc[p]);
340 #endif
341 
342  // Next set ghost object n_comps from other processors
343  for (processor_id_type p=1; p != this->n_processors(); ++p)
344  {
345  // Trade my requests with processor procup and procdown
346  processor_id_type procup = (this->processor_id() + p) %
347  this->n_processors();
348  processor_id_type procdown = (this->n_processors() +
349  this->processor_id() - p) %
350  this->n_processors();
351  std::vector<dof_id_type> request_to_fill;
352  this->comm().send_receive(procup, requested_ids[procup],
353  procdown, request_to_fill);
354 
355  // Fill those requests
356  const unsigned int
357  sys_num = this->sys_number(),
358  n_var_groups = this->n_variable_groups();
359 
360  std::vector<dof_id_type> ghost_data
361  (request_to_fill.size() * 2 * n_var_groups);
362 
363  for (std::size_t i=0; i != request_to_fill.size(); ++i)
364  {
365  DofObject *requested = (this->*objects)(mesh, request_to_fill[i]);
366  libmesh_assert(requested);
367  libmesh_assert_equal_to (requested->processor_id(), this->processor_id());
368  libmesh_assert_equal_to (requested->n_var_groups(sys_num), n_var_groups);
369  for (unsigned int vg=0; vg != n_var_groups; ++vg)
370  {
371  unsigned int n_comp_g =
372  requested->n_comp_group(sys_num, vg);
373  ghost_data[i*2*n_var_groups+vg] = n_comp_g;
374  dof_id_type my_first_dof = n_comp_g ?
375  requested->vg_dof_base(sys_num, vg) : 0;
376  libmesh_assert_not_equal_to (my_first_dof, DofObject::invalid_id);
377  ghost_data[i*2*n_var_groups+n_var_groups+vg] = my_first_dof;
378  }
379  }
380 
381  // Trade back the results
382  std::vector<dof_id_type> filled_request;
383  this->comm().send_receive(procdown, ghost_data,
384  procup, filled_request);
385 
386  // And copy the id changes we've now been informed of
387  libmesh_assert_equal_to (filled_request.size(),
388  requested_ids[procup].size() * 2 * n_var_groups);
389  for (std::size_t i=0; i != requested_ids[procup].size(); ++i)
390  {
391  DofObject *requested = (this->*objects)(mesh, requested_ids[procup][i]);
392  libmesh_assert(requested);
393  libmesh_assert_equal_to (requested->processor_id(), procup);
394  for (unsigned int vg=0; vg != n_var_groups; ++vg)
395  {
396  unsigned int n_comp_g =
397  libmesh_cast_int<unsigned int>(filled_request[i*2*n_var_groups+vg]);
398  requested->set_n_comp_group(sys_num, vg, n_comp_g);
399  if (n_comp_g)
400  {
401  dof_id_type my_first_dof =
402  filled_request[i*2*n_var_groups+n_var_groups+vg];
403  libmesh_assert_not_equal_to (my_first_dof, DofObject::invalid_id);
404  requested->set_vg_dof_base
405  (sys_num, vg, my_first_dof);
406  }
407  }
408  }
409  }
410 
411 #ifdef DEBUG
412  // Double check for invalid dofs
413  for (it = objects_begin; it != objects_end; ++it)
414  {
415  DofObject *obj = *it;
416  libmesh_assert (obj);
417  unsigned int num_variables = obj->n_vars(this->sys_number());
418  for (unsigned int v=0; v != num_variables; ++v)
419  {
420  unsigned int n_comp =
421  obj->n_comp(this->sys_number(), v);
422  dof_id_type my_first_dof = n_comp ?
423  obj->dof_number(this->sys_number(), v, 0) : 0;
424  libmesh_assert_not_equal_to (my_first_dof, DofObject::invalid_id);
425  }
426  }
427 #endif
428 }
429 
430 
431 
433 {
434  libmesh_assert (mesh.is_prepared());
435 
436  START_LOG("reinit()", "DofMap");
437 
438  const unsigned int
439  sys_num = this->sys_number(),
440  n_var_groups = this->n_variable_groups();
441 
442  // The DofObjects need to know how many variable groups we have, and
443  // how many variables there are in each group.
444  std::vector<unsigned int> n_vars_per_group; n_vars_per_group.reserve (n_var_groups);
445 
446  for (unsigned int vg=0; vg<n_var_groups; vg++)
447  n_vars_per_group.push_back (this->variable_group(vg).n_variables());
448 
449 #ifdef LIBMESH_ENABLE_AMR
450 
451  //------------------------------------------------------------
452  // Clear the old_dof_objects for all the nodes
453  // and elements so that we can overwrite them
454  {
455  MeshBase::node_iterator node_it = mesh.nodes_begin();
456  const MeshBase::node_iterator node_end = mesh.nodes_end();
457 
458  for ( ; node_it != node_end; ++node_it)
459  {
460  (*node_it)->clear_old_dof_object();
461  libmesh_assert (!(*node_it)->old_dof_object);
462  }
463 
465  const MeshBase::element_iterator elem_end = mesh.elements_end();
466 
467  for ( ; elem_it != elem_end; ++elem_it)
468  {
469  (*elem_it)->clear_old_dof_object();
470  libmesh_assert (!(*elem_it)->old_dof_object);
471  }
472  }
473 
474 
475  //------------------------------------------------------------
476  // Set the old_dof_objects for the elements that
477  // weren't just created, if these old dof objects
478  // had variables
479  {
481  const MeshBase::element_iterator elem_end = mesh.elements_end();
482 
483  for ( ; elem_it != elem_end; ++elem_it)
484  {
485  Elem* elem = *elem_it;
486 
487  // Skip the elements that were just refined
488  if (elem->refinement_flag() == Elem::JUST_REFINED) continue;
489 
490  for (unsigned int n=0; n<elem->n_nodes(); n++)
491  {
492  Node* node = elem->get_node(n);
493 
494  if (node->old_dof_object == NULL)
495  if (node->has_dofs(sys_num))
496  node->set_old_dof_object();
497  }
498 
500 
501  if (elem->has_dofs(sys_num))
502  elem->set_old_dof_object();
503  }
504  }
505 
506 #endif // #ifdef LIBMESH_ENABLE_AMR
507 
508 
509  //------------------------------------------------------------
510  // Then set the number of variables for each \p DofObject
511  // equal to n_variables() for this system. This will
512  // handle new \p DofObjects that may have just been created
513  {
514  // All the nodes
515  MeshBase::node_iterator node_it = mesh.nodes_begin();
516  const MeshBase::node_iterator node_end = mesh.nodes_end();
517 
518  for ( ; node_it != node_end; ++node_it)
519  (*node_it)->set_n_vars_per_group(sys_num, n_vars_per_group);
520 
521  // All the elements
523  const MeshBase::element_iterator elem_end = mesh.elements_end();
524 
525  for ( ; elem_it != elem_end; ++elem_it)
526  (*elem_it)->set_n_vars_per_group(sys_num, n_vars_per_group);
527  }
528 
529 
530  // Zero _n_SCALAR_dofs, it will be updated below.
531  this->_n_SCALAR_dofs = 0;
532 
533  //------------------------------------------------------------
534  // Next allocate space for the DOF indices
535  for (unsigned int vg=0; vg<n_var_groups; vg++)
536  {
537  const VariableGroup &vg_description = this->variable_group(vg);
538 
539  const unsigned int n_var_in_group = vg_description.n_variables();
540  const FEType& base_fe_type = vg_description.type();
541 
542  // Don't need to loop over elements for a SCALAR variable
543  // Just increment _n_SCALAR_dofs
544  if(base_fe_type.family == SCALAR)
545  {
546  this->_n_SCALAR_dofs += base_fe_type.order*n_var_in_group;
547  continue;
548  }
549 
550  // This should be constant even on p-refined elements
551  const bool extra_hanging_dofs =
552  FEInterface::extra_hanging_dofs(base_fe_type);
553 
554  // For all the active elements
556  const MeshBase::element_iterator elem_end = mesh.active_elements_end();
557 
558  // Count vertex degrees of freedom first
559  for ( ; elem_it != elem_end; ++elem_it)
560  {
561  Elem* elem = *elem_it;
562  libmesh_assert(elem);
563 
564  // Skip the numbering if this variable is
565  // not active on this element's subdomain
566  if (!vg_description.active_on_subdomain(elem->subdomain_id()))
567  continue;
568 
569  const ElemType type = elem->type();
570  const unsigned int dim = elem->dim();
571 
572  FEType fe_type = base_fe_type;
573 
574 #ifdef LIBMESH_ENABLE_AMR
575  // Make sure we haven't done more p refinement than we can
576  // handle
577  if (elem->p_level() + base_fe_type.order >
578  FEInterface::max_order(base_fe_type, type))
579  {
580 # ifdef DEBUG
581  if (FEInterface::max_order(base_fe_type,type) <
582  static_cast<unsigned int>(base_fe_type.order))
583  {
585  << "ERROR: Finite element "
586  << Utility::enum_to_string(base_fe_type.family)
587  << " on geometric element "
588  << Utility::enum_to_string(type) << std::endl
589  << "only supports FEInterface::max_order = "
590  << FEInterface::max_order(base_fe_type,type)
591  << ", not fe_type.order = " << base_fe_type.order
592  << std::endl;
593 
594  libmesh_error();
595  }
596 
598  << "WARNING: Finite element "
599  << Utility::enum_to_string(base_fe_type.family)
600  << " on geometric element "
601  << Utility::enum_to_string(type) << std::endl
602  << "could not be p refined past FEInterface::max_order = "
603  << FEInterface::max_order(base_fe_type,type)
604  << std::endl;
605 # endif
606  elem->set_p_level(FEInterface::max_order(base_fe_type,type)
607  - base_fe_type.order);
608  }
609 #endif
610 
611  fe_type.order = static_cast<Order>(fe_type.order +
612  elem->p_level());
613 
614  // Allocate the vertex DOFs
615  for (unsigned int n=0; n<elem->n_nodes(); n++)
616  {
617  Node* node = elem->get_node(n);
618 
619  if (elem->is_vertex(n))
620  {
621  const unsigned int old_node_dofs =
622  node->n_comp_group(sys_num, vg);
623 
624  const unsigned int vertex_dofs =
626  type, n),
627  old_node_dofs);
628 
629  // Some discontinuous FEs have no vertex dofs
630  if (vertex_dofs > old_node_dofs)
631  {
632  node->set_n_comp_group(sys_num, vg,
633  vertex_dofs);
634 
635  // Abusing dof_number to set a "this is a
636  // vertex" flag
637  node->set_vg_dof_base(sys_num, vg,
638  vertex_dofs);
639 
640  // libMesh::out << "sys_num,vg,old_node_dofs,vertex_dofs="
641  // << sys_num << ","
642  // << vg << ","
643  // << old_node_dofs << ","
644  // << vertex_dofs << '\n',
645  // node->debug_buffer();
646 
647  // libmesh_assert_equal_to (vertex_dofs, node->n_comp(sys_num, vg));
648  // libmesh_assert_equal_to (vertex_dofs, node->vg_dof_base(sys_num, vg));
649  }
650  }
651  }
652  } // done counting vertex dofs
653 
654  // count edge & face dofs next
655  elem_it = mesh.active_elements_begin();
656 
657  for ( ; elem_it != elem_end; ++elem_it)
658  {
659  Elem* elem = *elem_it;
660  libmesh_assert(elem);
661 
662  // Skip the numbering if this variable is
663  // not active on this element's subdomain
664  if (!vg_description.active_on_subdomain(elem->subdomain_id()))
665  continue;
666 
667  const ElemType type = elem->type();
668  const unsigned int dim = elem->dim();
669 
670  FEType fe_type = base_fe_type;
671  fe_type.order = static_cast<Order>(fe_type.order +
672  elem->p_level());
673 
674  // Allocate the edge and face DOFs
675  for (unsigned int n=0; n<elem->n_nodes(); n++)
676  {
677  Node* node = elem->get_node(n);
678 
679  const unsigned int old_node_dofs =
680  node->n_comp_group(sys_num, vg);
681 
682  const unsigned int vertex_dofs = old_node_dofs?
683  libmesh_cast_int<unsigned int>(node->vg_dof_base (sys_num,vg)):0;
684 
685  const unsigned int new_node_dofs =
686  FEInterface::n_dofs_at_node(dim, fe_type, type, n);
687 
688  // We've already allocated vertex DOFs
689  if (elem->is_vertex(n))
690  {
691  libmesh_assert_greater_equal (old_node_dofs, vertex_dofs);
692  // //if (vertex_dofs < new_node_dofs)
693  // libMesh::out << "sys_num,vg,old_node_dofs,vertex_dofs,new_node_dofs="
694  // << sys_num << ","
695  // << vg << ","
696  // << old_node_dofs << ","
697  // << vertex_dofs << ","
698  // << new_node_dofs << '\n',
699  // node->debug_buffer();
700 
701  libmesh_assert_greater_equal (vertex_dofs, new_node_dofs);
702  }
703  // We need to allocate the rest
704  else
705  {
706  // If this has no dofs yet, it needs no vertex
707  // dofs, so we just give it edge or face dofs
708  if (!old_node_dofs)
709  {
710  node->set_n_comp_group(sys_num, vg,
711  new_node_dofs);
712  // Abusing dof_number to set a "this has no
713  // vertex dofs" flag
714  if (new_node_dofs)
715  node->set_vg_dof_base(sys_num, vg,
716  0);
717  }
718 
719  // If this has dofs, but has no vertex dofs,
720  // it may still need more edge or face dofs if
721  // we're p-refined.
722  else if (vertex_dofs == 0)
723  {
724  if (new_node_dofs > old_node_dofs)
725  {
726  node->set_n_comp_group(sys_num, vg,
727  new_node_dofs);
728 
729  node->set_vg_dof_base(sys_num, vg,
730  vertex_dofs);
731  }
732  }
733  // If this is another element's vertex,
734  // add more (non-overlapping) edge/face dofs if
735  // necessary
736  else if (extra_hanging_dofs)
737  {
738  if (new_node_dofs > old_node_dofs - vertex_dofs)
739  {
740  node->set_n_comp_group(sys_num, vg,
741  vertex_dofs + new_node_dofs);
742 
743  node->set_vg_dof_base(sys_num, vg,
744  vertex_dofs);
745  }
746  }
747  // If this is another element's vertex, add any
748  // (overlapping) edge/face dofs if necessary
749  else
750  {
751  libmesh_assert_greater_equal (old_node_dofs, vertex_dofs);
752  if (new_node_dofs > old_node_dofs)
753  {
754  node->set_n_comp_group(sys_num, vg,
755  new_node_dofs);
756 
757  node->set_vg_dof_base (sys_num, vg,
758  vertex_dofs);
759  }
760  }
761  }
762  }
763  // Allocate the element DOFs
764  const unsigned int dofs_per_elem =
765  FEInterface::n_dofs_per_elem(dim, fe_type,
766  type);
767 
768  elem->set_n_comp_group(sys_num, vg, dofs_per_elem);
769 
770  }
771  } // end loop over variable groups
772 
773  // Calling DofMap::reinit() by itself makes little sense,
774  // so we won't bother with nonlocal DofObjects.
775  // Those will be fixed by distribute_dofs
776 
777  //------------------------------------------------------------
778  // Finally, clear all the current DOF indices
779  // (distribute_dofs expects them cleared!)
780  this->invalidate_dofs(mesh);
781 
782  STOP_LOG("reinit()", "DofMap");
783 }
784 
785 
786 
788 {
789  const unsigned int sys_num = this->sys_number();
790 
791  // All the nodes
792  MeshBase::node_iterator node_it = mesh.nodes_begin();
793  const MeshBase::node_iterator node_end = mesh.nodes_end();
794 
795  for ( ; node_it != node_end; ++node_it)
796  (*node_it)->invalidate_dofs(sys_num);
797 
798  // All the elements
800  const MeshBase::element_iterator elem_end = mesh.active_elements_end();
801 
802  for ( ; elem_it != elem_end; ++elem_it)
803  (*elem_it)->invalidate_dofs(sys_num);
804 }
805 
806 
807 
809 {
810  // we don't want to clear
811  // the coupling matrix!
812  // It should not change...
813  //_dof_coupling->clear();
814 
815  _variables.clear();
816  _variable_groups.clear();
817  _first_df.clear();
818  _end_df.clear();
819  _send_list.clear();
820  this->clear_sparsity();
822 
823 #ifdef LIBMESH_ENABLE_AMR
824 
825  _dof_constraints.clear();
828  _n_old_dfs = 0;
829  _first_old_df.clear();
830  _end_old_df.clear();
831 
832 #endif
833 
834  _matrices.clear();
835 
836  _n_dfs = 0;
837 }
838 
839 
840 
842 {
843  // This function must be run on all processors at once
844  parallel_object_only();
845 
846  // Log how long it takes to distribute the degrees of freedom
847  START_LOG("distribute_dofs()", "DofMap");
848 
849  libmesh_assert (mesh.is_prepared());
850 
851  const processor_id_type proc_id = this->processor_id();
852  const processor_id_type n_proc = this->n_processors();
853 
854 // libmesh_assert_greater (this->n_variables(), 0);
855  libmesh_assert_less (proc_id, n_proc);
856 
857  // re-init in case the mesh has changed
858  this->reinit(mesh);
859 
860  // By default distribute variables in a
861  // var-major fashion, but allow run-time
862  // specification
863  bool node_major_dofs = libMesh::on_command_line ("--node_major_dofs");
864 
865  // The DOF counter, will be incremented as we encounter
866  // new degrees of freedom
867  dof_id_type next_free_dof = 0;
868 
869  // Clear the send list before we rebuild it
870  _send_list.clear();
871 
872  // Set temporary DOF indices on this processor
873  if (node_major_dofs)
874  this->distribute_local_dofs_node_major (next_free_dof, mesh);
875  else
876  this->distribute_local_dofs_var_major (next_free_dof, mesh);
877 
878  // Get DOF counts on all processors
879  std::vector<dof_id_type> dofs_on_proc(n_proc, 0);
880  this->comm().allgather(next_free_dof, dofs_on_proc);
881 
882  // Resize and fill the _first_df and _end_df arrays
883 #ifdef LIBMESH_ENABLE_AMR
886 #endif
887 
888  _first_df.resize(n_proc);
889  _end_df.resize (n_proc);
890 
891  // Get DOF offsets
892  _first_df[0] = 0;
893  for (processor_id_type i=1; i < n_proc; ++i)
894  _first_df[i] = _end_df[i-1] = _first_df[i-1] + dofs_on_proc[i-1];
895  _end_df[n_proc-1] = _first_df[n_proc-1] + dofs_on_proc[n_proc-1];
896 
897  // Clear all the current DOF indices
898  // (distribute_dofs expects them cleared!)
899  this->invalidate_dofs(mesh);
900 
901  next_free_dof = _first_df[proc_id];
902 
903  // Set permanent DOF indices on this processor
904  if (node_major_dofs)
905  this->distribute_local_dofs_node_major (next_free_dof, mesh);
906  else
907  this->distribute_local_dofs_var_major (next_free_dof, mesh);
908 
909  libmesh_assert_equal_to (next_free_dof, _end_df[proc_id]);
910 
911  //------------------------------------------------------------
912  // At this point, all n_comp and dof_number values on local
913  // DofObjects should be correct, but a ParallelMesh might have
914  // incorrect values on non-local DofObjects. Let's request the
915  // correct values from each other processor.
916 
917  if (this->n_processors() > 1)
918  {
920  mesh.nodes_end(),
922 
924  mesh.elements_end(),
926  }
927 
928 #ifdef DEBUG
929  {
930  const unsigned int
931  sys_num = this->sys_number();
932 
933  // Processors should all agree on DoF ids
935 
936  // DoF processor ids should match DofObject processor ids
938  const MeshBase::const_node_iterator node_end = mesh.nodes_end();
939  for ( ; node_it != node_end; ++node_it)
940  {
941  DofObject const * const dofobj = *node_it;
942  const processor_id_type proc_id = dofobj->processor_id();
943 
944  for (unsigned int v=0; v != dofobj->n_vars(sys_num); ++v)
945  for (unsigned int c=0; c != dofobj->n_comp(sys_num,v); ++c)
946  {
947  const dof_id_type dofid = dofobj->dof_number(sys_num,v,c);
948  libmesh_assert_greater_equal (dofid, this->first_dof(proc_id));
949  libmesh_assert_less (dofid, this->end_dof(proc_id));
950  }
951  }
952 
954  const MeshBase::const_element_iterator elem_end = mesh.elements_end();
955  for ( ; elem_it != elem_end; ++elem_it)
956  {
957  DofObject const * const dofobj = *elem_it;
958  const processor_id_type proc_id = dofobj->processor_id();
959 
960  for (unsigned int v=0; v != dofobj->n_vars(sys_num); ++v)
961  for (unsigned int c=0; c != dofobj->n_comp(sys_num,v); ++c)
962  {
963  const dof_id_type dofid = dofobj->dof_number(sys_num,v,c);
964  libmesh_assert_greater_equal (dofid, this->first_dof(proc_id));
965  libmesh_assert_less (dofid, this->end_dof(proc_id));
966  }
967  }
968  }
969 #endif
970 
971  // Set the total number of degrees of freedom
972 #ifdef LIBMESH_ENABLE_AMR
973  _n_old_dfs = _n_dfs;
974 #endif
975  _n_dfs = _end_df[n_proc-1];
976 
977  STOP_LOG("distribute_dofs()", "DofMap");
978 
979  // Note that in the add_neighbors_to_send_list nodes on processor
980  // boundaries that are shared by multiple elements are added for
981  // each element.
982  this->add_neighbors_to_send_list(mesh);
983 
984  // Here we used to clean up that data structure; now System and
985  // EquationSystems call that for us, after we've added constraint
986  // dependencies to the send_list too.
987  // this->sort_send_list ();
988 }
989 
990 
992  MeshBase& mesh)
993 {
994  const unsigned int sys_num = this->sys_number();
995  const unsigned int n_var_groups = this->n_variable_groups();
996 
997  //-------------------------------------------------------------------------
998  // First count and assign temporary numbers to local dofs
1000  const MeshBase::element_iterator elem_end = mesh.active_local_elements_end();
1001 
1002  for ( ; elem_it != elem_end; ++elem_it)
1003  {
1004  // Only number dofs connected to active
1005  // elements on this processor.
1006  Elem* elem = *elem_it;
1007  const unsigned int n_nodes = elem->n_nodes();
1008 
1009  // First number the nodal DOFS
1010  for (unsigned int n=0; n<n_nodes; n++)
1011  {
1012  Node* node = elem->get_node(n);
1013 
1014  for (unsigned vg=0; vg<n_var_groups; vg++)
1015  {
1016  const VariableGroup &vg_description(this->variable_group(vg));
1017 
1018  if( (vg_description.type().family != SCALAR) &&
1019  (vg_description.active_on_subdomain(elem->subdomain_id())) )
1020  {
1021  // assign dof numbers (all at once) if this is
1022  // our node and if they aren't already there
1023  if ((node->n_comp_group(sys_num,vg) > 0) &&
1024  (node->processor_id() == this->processor_id()) &&
1025  (node->vg_dof_base(sys_num,vg) ==
1027  {
1028  node->set_vg_dof_base(sys_num,
1029  vg,
1030  next_free_dof);
1031  next_free_dof += (vg_description.n_variables()*
1032  node->n_comp_group(sys_num,vg));
1033  //node->debug_buffer();
1034  }
1035  }
1036  }
1037  }
1038 
1039  // Now number the element DOFS
1040  for (unsigned vg=0; vg<n_var_groups; vg++)
1041  {
1042  const VariableGroup &vg_description(this->variable_group(vg));
1043 
1044  if ( (vg_description.type().family != SCALAR) &&
1045  (vg_description.active_on_subdomain(elem->subdomain_id())) )
1046  if (elem->n_comp_group(sys_num,vg) > 0)
1047  {
1048  libmesh_assert_equal_to (elem->vg_dof_base(sys_num,vg),
1050 
1051  elem->set_vg_dof_base(sys_num,
1052  vg,
1053  next_free_dof);
1054 
1055  next_free_dof += (vg_description.n_variables()*
1056  elem->n_comp(sys_num,vg));
1057  }
1058  }
1059  } // done looping over elements
1060 
1061 
1062  // we may have missed assigning DOFs to nodes that we own
1063  // but to which we have no connected elements matching our
1064  // variable restriction criterion. this will happen, for example,
1065  // if variable V is restricted to subdomain S. We may not own
1066  // any elements which live in S, but we may own nodes which are
1067  // *connected* to elements which do. in this scenario these nodes
1068  // will presently have unnumbered DOFs. we need to take care of
1069  // them here since we own them and no other processor will touch them.
1070  {
1071  MeshBase::node_iterator node_it = mesh.local_nodes_begin();
1072  const MeshBase::node_iterator node_end = mesh.local_nodes_end();
1073 
1074  for (; node_it != node_end; ++node_it)
1075  {
1076  Node *node = *node_it;
1077  libmesh_assert(node);
1078 
1079  for (unsigned vg=0; vg<n_var_groups; vg++)
1080  {
1081  const VariableGroup &vg_description(this->variable_group(vg));
1082 
1083  if (node->n_comp_group(sys_num,vg))
1084  if (node->vg_dof_base(sys_num,vg) == DofObject::invalid_id)
1085  {
1086  node->set_vg_dof_base (sys_num,
1087  vg,
1088  next_free_dof);
1089 
1090  next_free_dof += (vg_description.n_variables()*
1091  node->n_comp(sys_num,vg));
1092  }
1093  }
1094  }
1095  }
1096 
1097  // Finally, count up the SCALAR dofs
1098  this->_n_SCALAR_dofs = 0;
1099  for (unsigned vg=0; vg<n_var_groups; vg++)
1100  {
1101  const VariableGroup &vg_description(this->variable_group(vg));
1102 
1103  if( vg_description.type().family == SCALAR )
1104  {
1105  this->_n_SCALAR_dofs += (vg_description.n_variables()*
1106  vg_description.type().order);
1107  continue;
1108  }
1109  }
1110 
1111  // Only increment next_free_dof if we're on the processor
1112  // that holds this SCALAR variable
1113  if ( this->processor_id() == (this->n_processors()-1) )
1114  next_free_dof += _n_SCALAR_dofs;
1115 
1116 #ifdef DEBUG
1117  {
1118  // libMesh::out << "next_free_dof=" << next_free_dof << std::endl
1119  // << "_n_SCALAR_dofs=" << _n_SCALAR_dofs << std::endl;
1120 
1121  // Make sure we didn't miss any nodes
1123 
1124  MeshBase::node_iterator node_it = mesh.local_nodes_begin();
1125  const MeshBase::node_iterator node_end = mesh.local_nodes_end();
1126  for (; node_it != node_end; ++node_it)
1127  {
1128  Node *obj = *node_it;
1129  libmesh_assert(obj);
1130  unsigned int n_var_g = obj->n_var_groups(this->sys_number());
1131  for (unsigned int vg=0; vg != n_var_g; ++vg)
1132  {
1133  unsigned int n_comp_g =
1134  obj->n_comp_group(this->sys_number(), vg);
1135  dof_id_type my_first_dof = n_comp_g ?
1136  obj->vg_dof_base(this->sys_number(), vg) : 0;
1137  libmesh_assert_not_equal_to (my_first_dof, DofObject::invalid_id);
1138  }
1139  }
1140  }
1141 #endif // DEBUG
1142 }
1143 
1144 
1145 
1147  MeshBase& mesh)
1148 {
1149  const unsigned int sys_num = this->sys_number();
1150  const unsigned int n_var_groups = this->n_variable_groups();
1151 
1152  //-------------------------------------------------------------------------
1153  // First count and assign temporary numbers to local dofs
1154  for (unsigned vg=0; vg<n_var_groups; vg++)
1155  {
1156  const VariableGroup &vg_description(this->variable_group(vg));
1157 
1158  const unsigned int n_vars_in_group = vg_description.n_variables();
1159 
1160  // Skip the SCALAR dofs
1161  if (vg_description.type().family == SCALAR)
1162  continue;
1163 
1165  const MeshBase::element_iterator elem_end = mesh.active_local_elements_end();
1166 
1167  for ( ; elem_it != elem_end; ++elem_it)
1168  {
1169  // Only number dofs connected to active
1170  // elements on this processor.
1171  Elem* elem = *elem_it;
1172 
1173  // ... and only variables which are active on
1174  // on this element's subdomain
1175  if (!vg_description.active_on_subdomain(elem->subdomain_id()))
1176  continue;
1177 
1178  const unsigned int n_nodes = elem->n_nodes();
1179 
1180  // First number the nodal DOFS
1181  for (unsigned int n=0; n<n_nodes; n++)
1182  {
1183  Node* node = elem->get_node(n);
1184 
1185  // assign dof numbers (all at once) if this is
1186  // our node and if they aren't already there
1187  if ((node->n_comp_group(sys_num,vg) > 0) &&
1188  (node->processor_id() == this->processor_id()) &&
1189  (node->vg_dof_base(sys_num,vg) ==
1191  {
1192  node->set_vg_dof_base(sys_num,
1193  vg,
1194  next_free_dof);
1195 
1196  next_free_dof += (n_vars_in_group*
1197  node->n_comp_group(sys_num,vg));
1198  }
1199  }
1200 
1201  // Now number the element DOFS
1202  if (elem->n_comp_group(sys_num,vg) > 0)
1203  {
1204  libmesh_assert_equal_to (elem->vg_dof_base(sys_num,vg),
1206 
1207  elem->set_vg_dof_base(sys_num,
1208  vg,
1209  next_free_dof);
1210 
1211  next_free_dof += (n_vars_in_group*
1212  elem->n_comp_group(sys_num,vg));
1213  }
1214  } // end loop on elements
1215 
1216  // we may have missed assigning DOFs to nodes that we own
1217  // but to which we have no connected elements matching our
1218  // variable restriction criterion. this will happen, for example,
1219  // if variable V is restricted to subdomain S. We may not own
1220  // any elements which live in S, but we may own nodes which are
1221  // *connected* to elements which do. in this scenario these nodes
1222  // will presently have unnumbered DOFs. we need to take care of
1223  // them here since we own them and no other processor will touch them.
1224  {
1225  MeshBase::node_iterator node_it = mesh.local_nodes_begin();
1226  const MeshBase::node_iterator node_end = mesh.local_nodes_end();
1227 
1228  for (; node_it != node_end; ++node_it)
1229  {
1230  Node *node = *node_it;
1231  libmesh_assert(node);
1232 
1233  if (node->n_comp_group(sys_num,vg))
1234  if (node->vg_dof_base(sys_num,vg) == DofObject::invalid_id)
1235  {
1236  node->set_vg_dof_base (sys_num,
1237  vg,
1238  next_free_dof);
1239 
1240  next_free_dof += (n_vars_in_group*
1241  node->n_comp_group(sys_num,vg));
1242  }
1243  }
1244  }
1245  } // end loop on variable groups
1246 
1247  // Finally, count up the SCALAR dofs
1248  this->_n_SCALAR_dofs = 0;
1249  for (unsigned vg=0; vg<n_var_groups; vg++)
1250  {
1251  const VariableGroup &vg_description(this->variable_group(vg));
1252 
1253  if( vg_description.type().family == SCALAR )
1254  {
1255  this->_n_SCALAR_dofs += (vg_description.n_variables()*
1256  vg_description.type().order);
1257  continue;
1258  }
1259  }
1260 
1261  // Only increment next_free_dof if we're on the processor
1262  // that holds this SCALAR variable
1263  if ( this->processor_id() == (this->n_processors()-1) )
1264  next_free_dof += _n_SCALAR_dofs;
1265 
1266 #ifdef DEBUG
1267  {
1268  // Make sure we didn't miss any nodes
1270 
1271  MeshBase::node_iterator node_it = mesh.local_nodes_begin();
1272  const MeshBase::node_iterator node_end = mesh.local_nodes_end();
1273  for (; node_it != node_end; ++node_it)
1274  {
1275  Node *obj = *node_it;
1276  libmesh_assert(obj);
1277  unsigned int n_var_g = obj->n_var_groups(this->sys_number());
1278  for (unsigned int vg=0; vg != n_var_g; ++vg)
1279  {
1280  unsigned int n_comp_g =
1281  obj->n_comp_group(this->sys_number(), vg);
1282  dof_id_type my_first_dof = n_comp_g ?
1283  obj->vg_dof_base(this->sys_number(), vg) : 0;
1284  libmesh_assert_not_equal_to (my_first_dof, DofObject::invalid_id);
1285  }
1286  }
1287  }
1288 #endif // DEBUG
1289 }
1290 
1291 
1292 
1294 {
1295  START_LOG("add_neighbors_to_send_list()", "DofMap");
1296 
1297  const unsigned int sys_num = this->sys_number();
1298 
1299  //-------------------------------------------------------------------------
1300  // We need to add the DOFs from elements that live on neighboring processors
1301  // that are neighbors of the elements on the local processor
1302  //-------------------------------------------------------------------------
1303 
1304  MeshBase::const_element_iterator local_elem_it
1305  = mesh.active_local_elements_begin();
1306  const MeshBase::const_element_iterator local_elem_end
1307  = mesh.active_local_elements_end();
1308 
1309  std::vector<bool> node_on_processor(mesh.max_node_id(), false);
1310  std::vector<dof_id_type> di;
1311  std::vector<const Elem *> family;
1312 
1313  // Loop over the active local elements, adding all active elements
1314  // that neighbor an active local element to the send list.
1315  for ( ; local_elem_it != local_elem_end; ++local_elem_it)
1316  {
1317  const Elem* elem = *local_elem_it;
1318 
1319  for (unsigned int n=0; n!=elem->n_nodes(); n++)
1320  {
1321  // Flag all the nodes of active local elements as seen, so
1322  // we can add nodal neighbor dofs to the send_list later.
1323  node_on_processor[elem->node(n)] = true;
1324 
1325  // Add all remote dofs on these nodes to the send_list.
1326  // This is necessary in case those dofs are *not* also dofs
1327  // on neighbors; e.g. in the case of a HIERARCHIC's local
1328  // side which is only a vertex on the neighbor that owns it.
1329  const Node* node = elem->get_node(n);
1330  const unsigned n_vars = node->n_vars(sys_num);
1331  for (unsigned int v=0; v != n_vars; ++v)
1332  {
1333  const unsigned int n_comp = node->n_comp(sys_num, v);
1334  for (unsigned int c=0; c != n_comp; ++c)
1335  {
1336  const dof_id_type dof_index = node->dof_number(sys_num, v, c);
1337  if (dof_index < this->first_dof() || dof_index >= this->end_dof())
1338  {
1339  _send_list.push_back(dof_index);
1340  // libmesh_here();
1341  // libMesh::out << "sys_num,v,c,dof_index="
1342  // << sys_num << ", "
1343  // << v << ", "
1344  // << c << ", "
1345  // << dof_index << '\n';
1346  // node->debug_buffer();
1347  }
1348  }
1349  }
1350  }
1351 
1352  // Loop over the neighbors of those elements
1353  for (unsigned int s=0; s<elem->n_neighbors(); s++)
1354  if (elem->neighbor(s) != NULL)
1355  {
1356  family.clear();
1357 
1358  // Find all the active elements that neighbor elem
1359 #ifdef LIBMESH_ENABLE_AMR
1360  if (!elem->neighbor(s)->active())
1361  elem->neighbor(s)->active_family_tree_by_neighbor(family, elem);
1362  else
1363 #endif
1364  family.push_back(elem->neighbor(s));
1365 
1366  for (dof_id_type i=0; i!=family.size(); ++i)
1367  // If the neighbor lives on a different processor
1368  if (family[i]->processor_id() != this->processor_id())
1369  {
1370  // Get the DOF indices for this neighboring element
1371  this->dof_indices (family[i], di);
1372 
1373  // Insert the remote DOF indices into the send list
1374  for (std::size_t j=0; j != di.size(); ++j)
1375  if (di[j] < this->first_dof() ||
1376  di[j] >= this->end_dof())
1377  _send_list.push_back(di[j]);
1378  }
1379  }
1380  }
1381 
1382  // Now loop over all non_local active elements and add any missing
1383  // nodal-only neighbors. This will also get any dofs from nonlocal
1384  // nodes on local elements, because every nonlocal node exists on a
1385  // nonlocal nodal neighbor element.
1387  = mesh.active_elements_begin();
1388  const MeshBase::const_element_iterator elem_end
1389  = mesh.active_elements_end();
1390 
1391  for ( ; elem_it != elem_end; ++elem_it)
1392  {
1393  const Elem* elem = *elem_it;
1394 
1395  // If this is one of our elements, we've already added it
1396  if (elem->processor_id() == this->processor_id())
1397  continue;
1398 
1399  // Do we need to add the element DOFs?
1400  bool add_elem_dofs = false;
1401 
1402  // Check all the nodes of the element to see if it
1403  // shares a node with us
1404  for (unsigned int n=0; n!=elem->n_nodes(); n++)
1405  if (node_on_processor[elem->node(n)])
1406  add_elem_dofs = true;
1407 
1408  // Add the element degrees of freedom if it shares at
1409  // least one node.
1410  if (add_elem_dofs)
1411  {
1412  // Get the DOF indices for this neighboring element
1413  this->dof_indices (elem, di);
1414 
1415  // Insert the remote DOF indices into the send list
1416  for (std::size_t j=0; j != di.size(); ++j)
1417  if (di[j] < this->first_dof() ||
1418  di[j] >= this->end_dof())
1419  _send_list.push_back(di[j]);
1420  }
1421  }
1422 
1423  STOP_LOG("add_neighbors_to_send_list()", "DofMap");
1424 }
1425 
1426 
1427 
1429 {
1430  START_LOG("prepare_send_list()", "DofMap");
1431 
1432  // Check to see if we have any extra stuff to add to the send_list
1434  {
1435  if (_augment_send_list)
1436  {
1437  libmesh_here();
1438  libMesh::out << "WARNING: You have specified both an extra send list function and object.\n"
1439  << " Are you sure this is what you meant to do??"
1440  << std::endl;
1441  }
1442 
1444  }
1445 
1446  if (_augment_send_list)
1448 
1449  // First sort the send list. After this
1450  // duplicated elements will be adjacent in the
1451  // vector
1452  std::sort(_send_list.begin(), _send_list.end());
1453 
1454  // Now use std::unique to remove duplicate entries
1455  std::vector<dof_id_type>::iterator new_end =
1456  std::unique (_send_list.begin(), _send_list.end());
1457 
1458  // Remove the end of the send_list. Use the "swap trick"
1459  // from Effective STL
1460  std::vector<dof_id_type> (_send_list.begin(), new_end).swap (_send_list);
1461 
1462  STOP_LOG("prepare_send_list()", "DofMap");
1463 }
1464 
1465 
1466 
1468 {
1469  // If we were asked on the command line, then we need to
1470  // include sensitivities between neighbor degrees of freedom
1471  bool implicit_neighbor_dofs =
1472  libMesh::on_command_line ("--implicit_neighbor_dofs");
1473 
1474  // look at all the variables in this system. If every one is
1475  // discontinuous then the user must be doing DG/FVM, so be nice
1476  // and force implicit_neighbor_dofs=true
1477  {
1478  bool all_discontinuous_dofs = true;
1479 
1480  for (unsigned int var=0; var<this->n_variables(); var++)
1481  if (FEAbstract::build (mesh.mesh_dimension(),
1482  this->variable_type(var))->get_continuity() != DISCONTINUOUS)
1483  all_discontinuous_dofs = false;
1484 
1485  if (all_discontinuous_dofs)
1486  implicit_neighbor_dofs = true;
1487  }
1488 
1489  return implicit_neighbor_dofs;
1490 }
1491 
1492 
1493 
1495 {
1496  _sp = this->build_sparsity(mesh);
1497 
1498  // It is possible that some \p SparseMatrix implementations want to
1499  // see it. Let them see it before we throw it away.
1500  std::vector<SparseMatrix<Number>* >::const_iterator
1501  pos = _matrices.begin(),
1502  end = _matrices.end();
1503 
1504  // If we need the full sparsity pattern, then we share a view of its
1505  // arrays, and we pass it in to the matrices.
1507  {
1508  _n_nz = &_sp->n_nz;
1509  _n_oz = &_sp->n_oz;
1510 
1511  for (; pos != end; ++pos)
1512  (*pos)->update_sparsity_pattern (_sp->sparsity_pattern);
1513  }
1514  // If we don't need the full sparsity pattern anymore, steal the
1515  // arrays we do need and free the rest of the memory
1516  else
1517  {
1518  if (!_n_nz)
1519  _n_nz = new std::vector<dof_id_type>();
1520  _n_nz->swap(_sp->n_nz);
1521  if (!_n_oz)
1522  _n_oz = new std::vector<dof_id_type>();
1523  _n_oz->swap(_sp->n_oz);
1524 
1525  _sp.reset();
1526  }
1527 }
1528 
1529 
1530 
1532 {
1534  {
1535  libmesh_assert(_sp.get());
1536  libmesh_assert(!_n_nz || _n_nz == &_sp->n_nz);
1537  libmesh_assert(!_n_oz || _n_oz == &_sp->n_oz);
1538  _sp.reset();
1539  }
1540  else
1541  {
1542  libmesh_assert(!_sp.get());
1543  delete _n_nz;
1544  delete _n_oz;
1545  }
1546  _n_nz = NULL;
1547  _n_oz = NULL;
1548 }
1549 
1550 
1551 
1553  const std::vector<dof_id_type>& dof_indices_in,
1554  DenseVectorBase<Number>& Ue) const
1555 {
1556 #ifdef LIBMESH_ENABLE_AMR
1557 
1558  // Trivial mapping
1559  libmesh_assert_equal_to (dof_indices_in.size(), Ue.size());
1560  bool has_constrained_dofs = false;
1561 
1562  for (unsigned int il=0;
1563  il != libmesh_cast_int<unsigned int>(dof_indices_in.size()); il++)
1564  {
1565  const dof_id_type ig = dof_indices_in[il];
1566 
1567  if (this->is_constrained_dof (ig)) has_constrained_dofs = true;
1568 
1569  libmesh_assert_less (ig, Ug.size());
1570 
1571  Ue.el(il) = Ug(ig);
1572  }
1573 
1574  // If the element has any constrained DOFs then we need
1575  // to account for them in the mapping. This will handle
1576  // the case that the input vector is not constrained.
1577  if (has_constrained_dofs)
1578  {
1579  // Copy the input DOF indices.
1580  std::vector<dof_id_type> constrained_dof_indices(dof_indices_in);
1581 
1584 
1585  this->build_constraint_matrix_and_vector (C, H, constrained_dof_indices);
1586 
1587  libmesh_assert_equal_to (dof_indices_in.size(), C.m());
1588  libmesh_assert_equal_to (constrained_dof_indices.size(), C.n());
1589 
1590  // zero-out Ue
1591  Ue.zero();
1592 
1593  // compute Ue = C Ug, with proper mapping.
1594  const unsigned int n_original_dofs =
1595  libmesh_cast_int<unsigned int>(dof_indices_in.size());
1596  for (unsigned int i=0; i != n_original_dofs; i++)
1597  {
1598  Ue.el(i) = H(i);
1599 
1600  const unsigned int n_constrained =
1601  libmesh_cast_int<unsigned int>(constrained_dof_indices.size());
1602  for (unsigned int j=0; j<n_constrained; j++)
1603  {
1604  const dof_id_type jg = constrained_dof_indices[j];
1605 
1606 // If Ug is a serial or ghosted vector, then this assert is
1607 // overzealous. If Ug is a parallel vector, then this assert
1608 // is redundant.
1609 // libmesh_assert ((jg >= Ug.first_local_index()) &&
1610 // (jg < Ug.last_local_index()));
1611 
1612  Ue.el(i) += C(i,j)*Ug(jg);
1613  }
1614  }
1615  }
1616 
1617 #else
1618 
1619  // Trivial mapping
1620 
1621  const unsigned int n_original_dofs =
1622  libmesh_cast_int<unsigned int>(dof_indices_in.size());
1623 
1624  libmesh_assert_equal_to (n_original_dofs, Ue.size());
1625 
1626  for (unsigned int il=0; il<n_original_dofs; il++)
1627  {
1628  const dof_id_type ig = dof_indices_in[il];
1629 
1630  libmesh_assert ((ig >= Ug.first_local_index()) && (ig < Ug.last_local_index()));
1631 
1632  Ue.el(il) = Ug(ig);
1633  }
1634 
1635 #endif
1636 }
1637 
1638 void DofMap::dof_indices (const Elem* const elem,
1639  std::vector<dof_id_type>& di,
1640  const unsigned int vn) const
1641 {
1642  START_LOG("dof_indices()", "DofMap");
1643 
1644  libmesh_assert(elem);
1645 
1646  const unsigned int n_nodes = elem->n_nodes();
1647  const ElemType type = elem->type();
1648  const unsigned int sys_num = this->sys_number();
1649  const unsigned int n_vars = this->n_variables();
1650  const unsigned int dim = elem->dim();
1651 
1652  // Clear the DOF indices vector
1653  di.clear();
1654 
1655 #ifdef DEBUG
1656  // Check that sizes match in DEBUG mode
1657  unsigned int tot_size = 0;
1658 #endif
1659 
1660  // Create a vector to indicate which
1661  // SCALAR variables have been requested
1662  std::vector<unsigned int> SCALAR_var_numbers;
1663  SCALAR_var_numbers.clear();
1664 
1665  // Get the dof numbers
1666  for (unsigned int v=0; v<n_vars; v++)
1667  if ((v == vn) || (vn == libMesh::invalid_uint))
1668  {
1669  if(this->variable(v).type().family == SCALAR)
1670  {
1671  // We asked for this variable, so add it to the vector.
1672  SCALAR_var_numbers.push_back(v);
1673 
1674 #ifdef DEBUG
1675  FEType fe_type = this->variable_type(v);
1676  tot_size += FEInterface::n_dofs(dim,
1677  fe_type,
1678  type);
1679 #endif
1680  }
1681  else
1682  if (this->variable(v).active_on_subdomain(elem->subdomain_id()))
1683  { // Do this for all the variables if one was not specified
1684  // or just for the specified variable
1685 
1686  // Increase the polynomial order on p refined elements
1687  FEType fe_type = this->variable_type(v);
1688  fe_type.order = static_cast<Order>(fe_type.order +
1689  elem->p_level());
1690 
1691  const bool extra_hanging_dofs =
1693 
1694 #ifdef DEBUG
1695  tot_size += FEInterface::n_dofs(dim,
1696  fe_type,
1697  type);
1698 #endif
1699 
1700  // Get the node-based DOF numbers
1701  for (unsigned int n=0; n<n_nodes; n++)
1702  {
1703  const Node* node = elem->get_node(n);
1704 
1705  // There is a potential problem with h refinement. Imagine a
1706  // quad9 that has a linear FE on it. Then, on the hanging side,
1707  // it can falsely identify a DOF at the mid-edge node. This is why
1708  // we call FEInterface instead of node->n_comp() directly.
1709  const unsigned int nc = FEInterface::n_dofs_at_node (dim,
1710  fe_type,
1711  type,
1712  n);
1713 
1714  // If this is a non-vertex on a hanging node with extra
1715  // degrees of freedom, we use the non-vertex dofs (which
1716  // come in reverse order starting from the end, to
1717  // simplify p refinement)
1718  if (extra_hanging_dofs && !elem->is_vertex(n))
1719  {
1720  const int dof_offset = node->n_comp(sys_num,v) - nc;
1721 
1722  // We should never have fewer dofs than necessary on a
1723  // node unless we're getting indices on a parent element,
1724  // and we should never need the indices on such a node
1725  if (dof_offset < 0)
1726  {
1727  libmesh_assert(!elem->active());
1728  di.resize(di.size() + nc, DofObject::invalid_id);
1729  }
1730  else
1731  for (int i=node->n_comp(sys_num,v)-1; i>=dof_offset; i--)
1732  {
1733  libmesh_assert_not_equal_to (node->dof_number(sys_num,v,i),
1735  di.push_back(node->dof_number(sys_num,v,i));
1736  }
1737  }
1738  // If this is a vertex or an element without extra hanging
1739  // dofs, our dofs come in forward order coming from the
1740  // beginning
1741  else
1742  for (unsigned int i=0; i<nc; i++)
1743  {
1744  libmesh_assert_not_equal_to (node->dof_number(sys_num,v,i),
1746  di.push_back(node->dof_number(sys_num,v,i));
1747  }
1748  }
1749 
1750  // If there are any element-based DOF numbers, get them
1751  const unsigned int nc = FEInterface::n_dofs_per_elem(dim,
1752  fe_type,
1753  type);
1754  // We should never have fewer dofs than necessary on an
1755  // element unless we're getting indices on a parent element,
1756  // and we should never need those indices
1757  if (nc != 0)
1758  {
1759  if (elem->n_systems() > sys_num &&
1760  nc <= elem->n_comp(sys_num,v))
1761  {
1762  for (unsigned int i=0; i<nc; i++)
1763  {
1764  libmesh_assert_not_equal_to (elem->dof_number(sys_num,v,i),
1766 
1767  di.push_back(elem->dof_number(sys_num,v,i));
1768  }
1769  }
1770  else
1771  {
1772  libmesh_assert(!elem->active() || fe_type.family == LAGRANGE);
1773  di.resize(di.size() + nc, DofObject::invalid_id);
1774  }
1775  }
1776  }
1777  } // end loop over variables
1778 
1779  // Finally append any SCALAR dofs that we asked for.
1780  std::vector<dof_id_type> di_new;
1781  std::vector<unsigned int>::iterator it = SCALAR_var_numbers.begin();
1782  std::vector<unsigned int>::const_iterator it_end = SCALAR_var_numbers.end();
1783  for( ; it != it_end; ++it)
1784  {
1785  this->SCALAR_dof_indices(di_new,*it);
1786  di.insert( di.end(), di_new.begin(), di_new.end());
1787  }
1788 
1789 #ifdef DEBUG
1790  libmesh_assert_equal_to (tot_size, di.size());
1791 #endif
1792 
1793  STOP_LOG("dof_indices()", "DofMap");
1794 }
1795 
1796 
1797 
1798 void DofMap::SCALAR_dof_indices (std::vector<dof_id_type>& di,
1799  const unsigned int vn,
1800 #ifdef LIBMESH_ENABLE_AMR
1801  const bool old_dofs
1802 #else
1803  const bool
1804 #endif
1805  ) const
1806 {
1807  START_LOG("SCALAR_dof_indices()", "DofMap");
1808 
1809  if(this->variable(vn).type().family != SCALAR)
1810  {
1811  libMesh::err << "ERROR: SCALAR_dof_indices called for a non-SCALAR variable."
1812  << std::endl;
1813  }
1814 
1815  // Clear the DOF indices vector
1816  di.clear();
1817 
1818  // First we need to find out the first dof
1819  // index for each SCALAR.
1820 #ifdef LIBMESH_ENABLE_AMR
1821  dof_id_type first_SCALAR_dof_index = (old_dofs ? n_old_dofs() : n_dofs()) - n_SCALAR_dofs();
1822 #else
1823  dof_id_type first_SCALAR_dof_index = n_dofs() - n_SCALAR_dofs();
1824 #endif
1825  std::map<unsigned int, dof_id_type> SCALAR_first_dof_index;
1826  SCALAR_first_dof_index.clear();
1827 
1828  // Iterate over _all_ of the SCALARs and store each one's first dof index
1829  // We need to do this since the SCALAR dofs are packed contiguously
1830  for (unsigned int v=0; v<this->n_variables(); v++)
1831  if(this->variable(v).type().family == SCALAR)
1832  {
1833  unsigned int current_n_SCALAR_dofs = this->variable(v).type().order;
1834  SCALAR_first_dof_index.insert(
1835  std::pair<unsigned int, dof_id_type>(v,first_SCALAR_dof_index) );
1836  first_SCALAR_dof_index += current_n_SCALAR_dofs;
1837  }
1838 
1839  // Now use vn to index into SCALAR_first_dof_index
1840  std::map<unsigned int, dof_id_type>::const_iterator iter =
1841  SCALAR_first_dof_index.find(vn);
1842 
1843 #ifdef DEBUG
1844  libmesh_assert (iter != SCALAR_first_dof_index.end());
1845 #endif
1846 
1847  dof_id_type current_first_SCALAR_dof_index = iter->second;
1848 
1849  // Also, get the number of SCALAR dofs from the variable order
1850  unsigned int current_n_SCALAR_dofs = this->variable(vn).type().order;
1851 
1852  for(unsigned int j=0; j<current_n_SCALAR_dofs; j++)
1853  {
1854  dof_id_type index = current_first_SCALAR_dof_index+j;
1855  di.push_back(index);
1856  }
1857 
1858  STOP_LOG("SCALAR_dof_indices()", "DofMap");
1859 }
1860 
1861 
1862 
1863 bool DofMap::all_semilocal_indices (const std::vector<dof_id_type>& dof_indices_in) const
1864 {
1865  // We're all semilocal unless we find a counterexample
1866  for (std::size_t i=0; i != dof_indices_in.size(); ++i)
1867  {
1868  const dof_id_type di = dof_indices_in[i];
1869  // If it's not in the local indices
1870  if (di < this->first_dof() ||
1871  di >= this->end_dof())
1872  {
1873  // and if it's not in the ghost indices, then we're not
1874  // semilocal
1875  if (!std::binary_search(_send_list.begin(), _send_list.end(), di))
1876  return false;
1877  }
1878  }
1879 
1880  return true;
1881 }
1882 
1883 
1884 #ifdef LIBMESH_ENABLE_AMR
1885 
1886 void DofMap::old_dof_indices (const Elem* const elem,
1887  std::vector<dof_id_type>& di,
1888  const unsigned int vn) const
1889 {
1890  START_LOG("old_dof_indices()", "DofMap");
1891 
1892  libmesh_assert(elem);
1894 
1895 
1896  const unsigned int n_nodes = elem->n_nodes();
1897  const ElemType type = elem->type();
1898  const unsigned int sys_num = this->sys_number();
1899  const unsigned int n_vars = this->n_variables();
1900  const unsigned int dim = elem->dim();
1901 
1902  // Clear the DOF indices vector.
1903  di.clear();
1904 
1905 #ifdef DEBUG
1906  // Check that sizes match
1907  unsigned int tot_size = 0;
1908 #endif
1909 
1910  // Create a vector to indicate which
1911  // SCALAR variables have been requested
1912  std::vector<unsigned int> SCALAR_var_numbers;
1913  SCALAR_var_numbers.clear();
1914 
1915  // Get the dof numbers
1916  for (unsigned int v=0; v<n_vars; v++)
1917  if ((v == vn) || (vn == libMesh::invalid_uint))
1918  {
1919  if(this->variable(v).type().family == SCALAR)
1920  {
1921  // We asked for this variable, so add it to the vector.
1922  SCALAR_var_numbers.push_back(v);
1923 
1924 #ifdef DEBUG
1925  FEType fe_type = this->variable_type(v);
1926  tot_size += FEInterface::n_dofs(dim,
1927  fe_type,
1928  type);
1929 #endif
1930  }
1931  else
1932  if (this->variable(v).active_on_subdomain(elem->subdomain_id()))
1933  { // Do this for all the variables if one was not specified
1934  // or just for the specified variable
1935 
1936  // Increase the polynomial order on p refined elements,
1937  // but make sure you get the right polynomial order for
1938  // the OLD degrees of freedom
1939  int p_adjustment = 0;
1940  if (elem->p_refinement_flag() == Elem::JUST_REFINED)
1941  {
1942  libmesh_assert_greater (elem->p_level(), 0);
1943  p_adjustment = -1;
1944  }
1945  else if (elem->p_refinement_flag() == Elem::JUST_COARSENED)
1946  {
1947  p_adjustment = 1;
1948  }
1949  FEType fe_type = this->variable_type(v);
1950  fe_type.order = static_cast<Order>(fe_type.order +
1951  elem->p_level() +
1952  p_adjustment);
1953 
1954  const bool extra_hanging_dofs =
1956 
1957 #ifdef DEBUG
1958  tot_size += FEInterface::n_dofs(dim,
1959  fe_type,
1960  type);
1961 #endif
1962 
1963  // Get the node-based DOF numbers
1964  for (unsigned int n=0; n<n_nodes; n++)
1965  {
1966  const Node* node = elem->get_node(n);
1967 
1968  // There is a potential problem with h refinement. Imagine a
1969  // quad9 that has a linear FE on it. Then, on the hanging side,
1970  // it can falsely identify a DOF at the mid-edge node. This is why
1971  // we call FEInterface instead of node->n_comp() directly.
1972  const unsigned int nc = FEInterface::n_dofs_at_node (dim,
1973  fe_type,
1974  type,
1975  n);
1977 
1978  // If this is a non-vertex on a hanging node with extra
1979  // degrees of freedom, we use the non-vertex dofs (which
1980  // come in reverse order starting from the end, to
1981  // simplify p refinement)
1982  if (extra_hanging_dofs && !elem->is_vertex(n))
1983  {
1984  const int dof_offset =
1985  node->old_dof_object->n_comp(sys_num,v) - nc;
1986 
1987  // We should never have fewer dofs than necessary on a
1988  // node unless we're getting indices on a parent element
1989  // or a just-coarsened element
1990  if (dof_offset < 0)
1991  {
1992  libmesh_assert(!elem->active() || elem->refinement_flag() ==
1994  di.resize(di.size() + nc, DofObject::invalid_id);
1995  }
1996  else
1997  for (int i=node->old_dof_object->n_comp(sys_num,v)-1;
1998  i>=dof_offset; i--)
1999  {
2000  libmesh_assert_not_equal_to (node->old_dof_object->dof_number(sys_num,v,i),
2002  di.push_back(node->old_dof_object->dof_number(sys_num,v,i));
2003  }
2004  }
2005  // If this is a vertex or an element without extra hanging
2006  // dofs, our dofs come in forward order coming from the
2007  // beginning
2008  else
2009  for (unsigned int i=0; i<nc; i++)
2010  {
2011  libmesh_assert_not_equal_to (node->old_dof_object->dof_number(sys_num,v,i),
2013  di.push_back(node->old_dof_object->dof_number(sys_num,v,i));
2014  }
2015  }
2016 
2017  // If there are any element-based DOF numbers, get them
2018  const unsigned int nc = FEInterface::n_dofs_per_elem(dim,
2019  fe_type,
2020  type);
2021 
2022  // We should never have fewer dofs than necessary on an
2023  // element unless we're getting indices on a parent element
2024  // or a just-coarsened element
2025  if (nc != 0)
2026  {
2027  if (elem->old_dof_object->n_systems() > sys_num &&
2028  nc <= elem->old_dof_object->n_comp(sys_num,v))
2029  {
2031 
2032  for (unsigned int i=0; i<nc; i++)
2033  {
2034  libmesh_assert_not_equal_to (elem->old_dof_object->dof_number(sys_num,v,i),
2036 
2037  di.push_back(elem->old_dof_object->dof_number(sys_num,v,i));
2038  }
2039  }
2040  else
2041  {
2042  libmesh_assert(!elem->active() || fe_type.family == LAGRANGE ||
2044  di.resize(di.size() + nc, DofObject::invalid_id);
2045  }
2046  }
2047  }
2048  } // end loop over variables
2049 
2050  // Finally append any SCALAR dofs that we asked for.
2051  std::vector<dof_id_type> di_new;
2052  std::vector<unsigned int>::iterator it = SCALAR_var_numbers.begin();
2053  std::vector<unsigned int>::const_iterator it_end = SCALAR_var_numbers.end();
2054  for( ; it != it_end; ++it)
2055  {
2056  this->SCALAR_dof_indices(di_new,*it,true);
2057  di.insert( di.end(), di_new.begin(), di_new.end());
2058  }
2059 
2060 #ifdef DEBUG
2061  libmesh_assert_equal_to (tot_size, di.size());
2062 #endif
2063 
2064  STOP_LOG("old_dof_indices()", "DofMap");
2065 }
2066 
2067 
2068 
2069 /*
2070 void DofMap::augment_send_list_for_projection(const MeshBase& mesh)
2071 {
2072  // Loop over the active local elements in the mesh.
2073  // If the element was just refined and its parent lives
2074  // on a different processor then we need to augment the
2075  // _send_list with the parent's DOF indices so that we
2076  // can access the parent's data for computing solution
2077  // projections, etc...
2078 
2079  // The DOF indices for the parent
2080  std::vector<dof_id_type> di;
2081 
2082  // Flag telling us if we need to re-sort the send_list.
2083  // Note we won't need to re-sort unless a child with a
2084  // parent belonging to a different processor is found
2085  bool needs_sorting = false;
2086 
2087 
2088  MeshBase::const_element_iterator elem_it = mesh.active_local_elements_begin();
2089  const MeshBase::const_element_iterator elem_end = mesh.active_local_elements_end();
2090 
2091  for ( ; elem_it != elem_end; ++elem_it)
2092  {
2093  const Elem* const elem = *elem_it;
2094 
2095  // We only need to consider the children that
2096  // were just refined
2097  if (elem->refinement_flag() != Elem::JUST_REFINED) continue;
2098 
2099  const Elem* const parent = elem->parent();
2100 
2101  // If the parent lives on another processor
2102  // than the child
2103  if (parent != NULL)
2104  if (parent->processor_id() != elem->processor_id())
2105  {
2106  // Get the DOF indices for the parent
2107  this->dof_indices (parent, di);
2108 
2109  // Insert the DOF indices into the send list
2110  _send_list.insert (_send_list.end(),
2111  di.begin(), di.end());
2112 
2113  // We will need to re-sort the send list
2114  needs_sorting = true;
2115  }
2116  }
2117 
2118  // The send-list might need to be sorted again.
2119  if (needs_sorting) this->sort_send_list ();
2120 }
2121 */
2122 
2123 #endif // LIBMESH_ENABLE_AMR
2124 
2125 
2126 #ifdef LIBMESH_ENABLE_CONSTRAINTS
2127 
2128 void DofMap::find_connected_dofs (std::vector<dof_id_type>& elem_dofs) const
2129 {
2130  typedef std::set<dof_id_type> RCSet;
2131 
2132  // First insert the DOFS we already depend on into the set.
2133  RCSet dof_set (elem_dofs.begin(), elem_dofs.end());
2134 
2135  bool done = true;
2136 
2137  // Next insert any dofs those might be constrained in terms
2138  // of. Note that in this case we may not be done: Those may
2139  // in turn depend on others. So, we need to repeat this process
2140  // in that case until the system depends only on unconstrained
2141  // degrees of freedom.
2142  for (unsigned int i=0; i<elem_dofs.size(); i++)
2143  if (this->is_constrained_dof(elem_dofs[i]))
2144  {
2145  // If the DOF is constrained
2146  DofConstraints::const_iterator
2147  pos = _dof_constraints.find(elem_dofs[i]);
2148 
2149  libmesh_assert (pos != _dof_constraints.end());
2150 
2151  const DofConstraintRow& constraint_row = pos->second;
2152 
2153 // adaptive p refinement currently gives us lots of empty constraint
2154 // rows - we should optimize those DoFs away in the future. [RHS]
2155 // libmesh_assert (!constraint_row.empty());
2156 
2157  DofConstraintRow::const_iterator it = constraint_row.begin();
2158  DofConstraintRow::const_iterator it_end = constraint_row.end();
2159 
2160 
2161  // Add the DOFs this dof is constrained in terms of.
2162  // note that these dofs might also be constrained, so
2163  // we will need to call this function recursively.
2164  for ( ; it != it_end; ++it)
2165  if (!dof_set.count (it->first))
2166  {
2167  dof_set.insert (it->first);
2168  done = false;
2169  }
2170  }
2171 
2172 
2173  // If not done then we need to do more work
2174  // (obviously :-) )!
2175  if (!done)
2176  {
2177  // Fill the vector with the contents of the set
2178  elem_dofs.clear();
2179  elem_dofs.insert (elem_dofs.end(),
2180  dof_set.begin(), dof_set.end());
2181 
2182 
2183  // May need to do this recursively. It is possible
2184  // that we just replaced a constrained DOF with another
2185  // constrained DOF.
2186  this->find_connected_dofs (elem_dofs);
2187 
2188  } // end if (!done)
2189 }
2190 
2191 #endif // LIBMESH_ENABLE_CONSTRAINTS
2192 
2193 
2194 
2195 #if defined(__GNUC__) && (__GNUC__ < 4) && !defined(__INTEL_COMPILER)
2196 
2198 {
2199 }
2200 
2201 #endif
2202 
2203 
2204 
2206 {
2207  // Compute the sparsity structure of the global matrix. This can be
2208  // fed into a PetscMatrix to allocate exacly the number of nonzeros
2209  // necessary to store the matrix. This algorithm should be linear
2210  // in the (# of elements)*(# nodes per element)
2211  const processor_id_type proc_id = mesh.processor_id();
2212  const dof_id_type n_dofs_on_proc = dof_map.n_dofs_on_processor(proc_id);
2213  const dof_id_type first_dof_on_proc = dof_map.first_dof(proc_id);
2214  const dof_id_type end_dof_on_proc = dof_map.end_dof(proc_id);
2215 
2216  sparsity_pattern.resize(n_dofs_on_proc);
2217 
2218  // If the user did not explicitly specify the DOF coupling
2219  // then all the DOFS are coupled to each other. Furthermore,
2220  // we can take a shortcut and do this more quickly here. So
2221  // we use an if-test.
2222  if ((dof_coupling == NULL) || (dof_coupling->empty()))
2223  {
2224  std::vector<dof_id_type>
2225  element_dofs,
2226  neighbor_dofs,
2227  dofs_to_add;
2228 
2229  std::vector<const Elem*> active_neighbors;
2230 
2231  for (ConstElemRange::const_iterator elem_it = range.begin() ; elem_it != range.end(); ++elem_it)
2232  {
2233  const Elem* const elem = *elem_it;
2234 
2235  // Get the global indices of the DOFs with support on this element
2236  dof_map.dof_indices (elem, element_dofs);
2237 #ifdef LIBMESH_ENABLE_CONSTRAINTS
2238  dof_map.find_connected_dofs (element_dofs);
2239 #endif
2240 
2241  // We can be more efficient if we sort the element DOFs
2242  // into increasing order
2243  std::sort(element_dofs.begin(), element_dofs.end());
2244 
2245  const unsigned int n_dofs_on_element =
2246  libmesh_cast_int<unsigned int>(element_dofs.size());
2247 
2248  for (unsigned int i=0; i<n_dofs_on_element; i++)
2249  {
2250  const dof_id_type ig = element_dofs[i];
2251 
2252  SparsityPattern::Row *row;
2253 
2254  // We save non-local row components for now so we can
2255  // communicate them to other processors later.
2256 
2257  if ((ig >= first_dof_on_proc) &&
2258  (ig < end_dof_on_proc))
2259  {
2260  // This is what I mean
2261  // libmesh_assert_greater_equal ((ig - first_dof_on_proc), 0);
2262  // but do the test like this because ig and
2263  // first_dof_on_proc are unsigned ints
2264  libmesh_assert_greater_equal (ig, first_dof_on_proc);
2265  libmesh_assert_less ((ig - first_dof_on_proc), sparsity_pattern.size());
2266 
2267  row = &sparsity_pattern[ig - first_dof_on_proc];
2268  }
2269  else
2270  {
2271  row = &nonlocal_pattern[ig];
2272  }
2273 
2274  // If the row is empty we will add *all* the element DOFs,
2275  // so just do that.
2276  if (row->empty())
2277  {
2278  row->insert(row->end(),
2279  element_dofs.begin(),
2280  element_dofs.end());
2281  }
2282  else
2283  {
2284  // Build a list of the DOF indices not found in the
2285  // sparsity pattern
2286  dofs_to_add.clear();
2287 
2288  // Cache iterators. Low will move forward, subsequent
2289  // searches will be on smaller ranges
2290  SparsityPattern::Row::iterator
2291  low = std::lower_bound (row->begin(), row->end(), element_dofs.front()),
2292  high = std::upper_bound (low, row->end(), element_dofs.back());
2293 
2294  for (unsigned int j=0; j<n_dofs_on_element; j++)
2295  {
2296  const dof_id_type jg = element_dofs[j];
2297 
2298  // See if jg is in the sorted range
2299  std::pair<SparsityPattern::Row::iterator,
2300  SparsityPattern::Row::iterator>
2301  pos = std::equal_range (low, high, jg);
2302 
2303  // Must add jg if it wasn't found
2304  if (pos.first == pos.second)
2305  dofs_to_add.push_back(jg);
2306 
2307  // pos.first is now a valid lower bound for any
2308  // remaining element DOFs. (That's why we sorted them.)
2309  // Use it for the next search
2310  low = pos.first;
2311  }
2312 
2313  // Add to the sparsity pattern
2314  if (!dofs_to_add.empty())
2315  {
2316  const std::size_t old_size = row->size();
2317 
2318  row->insert (row->end(),
2319  dofs_to_add.begin(),
2320  dofs_to_add.end());
2321 
2323  (row->begin(), row->begin()+old_size, row->end());
2324  }
2325  }
2326 
2327  // Now (possibly) add dofs from neighboring elements
2328  // TODO:[BSK] optimize this like above!
2330  for (unsigned int s=0; s<elem->n_sides(); s++)
2331  if (elem->neighbor(s) != NULL)
2332  {
2333  const Elem* const neighbor_0 = elem->neighbor(s);
2334 #ifdef LIBMESH_ENABLE_AMR
2335  neighbor_0->active_family_tree_by_neighbor(active_neighbors,elem);
2336 #else
2337  active_neighbors.clear();
2338  active_neighbors.push_back(neighbor_0);
2339 #endif
2340 
2341  for (std::size_t a=0; a != active_neighbors.size(); ++a)
2342  {
2343  const Elem *neighbor = active_neighbors[a];
2344 
2345  dof_map.dof_indices (neighbor, neighbor_dofs);
2346 #ifdef LIBMESH_ENABLE_CONSTRAINTS
2347  dof_map.find_connected_dofs (neighbor_dofs);
2348 #endif
2349  const std::size_t n_dofs_on_neighbor = neighbor_dofs.size();
2350 
2351  for (std::size_t j=0; j<n_dofs_on_neighbor; j++)
2352  {
2353  const dof_id_type jg = neighbor_dofs[j];
2354 
2355  // See if jg is in the sorted range
2356  std::pair<SparsityPattern::Row::iterator,
2357  SparsityPattern::Row::iterator>
2358  pos = std::equal_range (row->begin(), row->end(), jg);
2359 
2360  // Insert jg if it wasn't found
2361  if (pos.first == pos.second)
2362  row->insert (pos.first, jg);
2363  }
2364  }
2365  }
2366  }
2367  }
2368  }
2369 
2370  // This is what we do in the case that the user has specified
2371  // explicit DOF coupling.
2372  else
2373  {
2375  libmesh_assert_equal_to (dof_coupling->size(),
2376  dof_map.n_variables());
2377 
2378  const unsigned int n_var = dof_map.n_variables();
2379 
2380  std::vector<dof_id_type>
2381  element_dofs_i,
2382  element_dofs_j,
2383  neighbor_dofs,
2384  dofs_to_add;
2385 
2386 
2387  std::vector<const Elem*> active_neighbors;
2388  for (ConstElemRange::const_iterator elem_it = range.begin() ; elem_it != range.end(); ++elem_it)
2389  for (unsigned int vi=0; vi<n_var; vi++)
2390  {
2391  const Elem* const elem = *elem_it;
2392 
2393  // Find element dofs for variable vi
2394  dof_map.dof_indices (elem, element_dofs_i, vi);
2395 #ifdef LIBMESH_ENABLE_CONSTRAINTS
2396  dof_map.find_connected_dofs (element_dofs_i);
2397 #endif
2398 
2399  // We can be more efficient if we sort the element DOFs
2400  // into increasing order
2401  std::sort(element_dofs_i.begin(), element_dofs_i.end());
2402  const unsigned int n_dofs_on_element_i =
2403  libmesh_cast_int<unsigned int>(element_dofs_i.size());
2404 
2405  for (unsigned int vj=0; vj<n_var; vj++)
2406  if ((*dof_coupling)(vi,vj)) // If vi couples to vj
2407  {
2408  // Find element dofs for variable vj, note that
2409  // if vi==vj we already have the dofs.
2410  if (vi != vj)
2411  {
2412  dof_map.dof_indices (elem, element_dofs_j, vj);
2413 #ifdef LIBMESH_ENABLE_CONSTRAINTS
2414  dof_map.find_connected_dofs (element_dofs_j);
2415 #endif
2416 
2417  // We can be more efficient if we sort the element DOFs
2418  // into increasing order
2419  std::sort (element_dofs_j.begin(), element_dofs_j.end());
2420  }
2421  else
2422  element_dofs_j = element_dofs_i;
2423 
2424  const unsigned int n_dofs_on_element_j =
2425  libmesh_cast_int<unsigned int>(element_dofs_j.size());
2426 
2427  // there might be 0 dofs for the other variable on the same element (when subdomain variables do not overlap) and that's when we do not do anything
2428  if (n_dofs_on_element_j > 0)
2429  {
2430  for (unsigned int i=0; i<n_dofs_on_element_i; i++)
2431  {
2432  const dof_id_type ig = element_dofs_i[i];
2433 
2434  SparsityPattern::Row *row;
2435 
2436  // We save non-local row components for now so we can
2437  // communicate them to other processors later.
2438 
2439  if ((ig >= first_dof_on_proc) &&
2440  (ig < end_dof_on_proc))
2441  {
2442  // This is what I mean
2443  // libmesh_assert_greater_equal ((ig - first_dof_on_proc), 0);
2444  // but do the test like this because ig and
2445  // first_dof_on_proc are unsigned ints
2446  libmesh_assert_greater_equal (ig, first_dof_on_proc);
2447  libmesh_assert_less (ig, (sparsity_pattern.size() +
2448  first_dof_on_proc));
2449 
2450  row = &sparsity_pattern[ig - first_dof_on_proc];
2451  }
2452  else
2453  {
2454  row = &nonlocal_pattern[ig];
2455  }
2456 
2457  // If the row is empty we will add *all* the element j DOFs,
2458  // so just do that.
2459  if (row->empty())
2460  {
2461  row->insert(row->end(),
2462  element_dofs_j.begin(),
2463  element_dofs_j.end());
2464  }
2465  else
2466  {
2467  // Build a list of the DOF indices not found in the
2468  // sparsity pattern
2469  dofs_to_add.clear();
2470 
2471  // Cache iterators. Low will move forward, subsequent
2472  // searches will be on smaller ranges
2473  SparsityPattern::Row::iterator
2474  low = std::lower_bound
2475  (row->begin(), row->end(), element_dofs_j.front()),
2476  high = std::upper_bound
2477  (low, row->end(), element_dofs_j.back());
2478 
2479  for (unsigned int j=0; j<n_dofs_on_element_j; j++)
2480  {
2481  const dof_id_type jg = element_dofs_j[j];
2482 
2483  // See if jg is in the sorted range
2484  std::pair<SparsityPattern::Row::iterator,
2485  SparsityPattern::Row::iterator>
2486  pos = std::equal_range (low, high, jg);
2487 
2488  // Must add jg if it wasn't found
2489  if (pos.first == pos.second)
2490  dofs_to_add.push_back(jg);
2491 
2492  // pos.first is now a valid lower bound for any
2493  // remaining element j DOFs. (That's why we sorted them.)
2494  // Use it for the next search
2495  low = pos.first;
2496  }
2497 
2498  // Add to the sparsity pattern
2499  if (!dofs_to_add.empty())
2500  {
2501  const std::size_t old_size = row->size();
2502 
2503  row->insert (row->end(),
2504  dofs_to_add.begin(),
2505  dofs_to_add.end());
2506 
2508  (row->begin(), row->begin()+old_size,
2509  row->end());
2510  }
2511  }
2512  // Now (possibly) add dofs from neighboring elements
2513  // TODO:[BSK] optimize this like above!
2515  for (unsigned int s=0; s<elem->n_sides(); s++)
2516  if (elem->neighbor(s) != NULL)
2517  {
2518  const Elem* const neighbor_0 = elem->neighbor(s);
2519 #ifdef LIBMESH_ENABLE_AMR
2520  neighbor_0->active_family_tree_by_neighbor(active_neighbors,elem);
2521 #else
2522  active_neighbors.clear();
2523  active_neighbors.push_back(neighbor_0);
2524 #endif
2525 
2526  for (std::size_t a=0; a != active_neighbors.size(); ++a)
2527  {
2528  const Elem *neighbor = active_neighbors[a];
2529 
2530  dof_map.dof_indices (neighbor, neighbor_dofs);
2531 #ifdef LIBMESH_ENABLE_CONSTRAINTS
2532  dof_map.find_connected_dofs (neighbor_dofs);
2533 #endif
2534  const unsigned int n_dofs_on_neighbor =
2535  libmesh_cast_int<unsigned int>(neighbor_dofs.size());
2536 
2537  for (unsigned int j=0; j<n_dofs_on_neighbor; j++)
2538  {
2539  const dof_id_type jg = neighbor_dofs[j];
2540 
2541  // See if jg is in the sorted range
2542  std::pair<SparsityPattern::Row::iterator,
2543  SparsityPattern::Row::iterator>
2544  pos = std::equal_range (row->begin(), row->end(), jg);
2545 
2546  // Insert jg if it wasn't found
2547  if (pos.first == pos.second)
2548  row->insert (pos.first, jg);
2549  }
2550  }
2551  }
2552  }
2553  }
2554  }
2555  }
2556  }
2557 
2558  // Now a new chunk of sparsity structure is built for all of the
2559  // DOFs connected to our rows of the matrix.
2560 
2561  // If we're building a full sparsity pattern, then we've got
2562  // complete rows to work with, so we can just count them from
2563  // scratch.
2565  {
2566  n_nz.clear();
2567  n_oz.clear();
2568  }
2569 
2570  n_nz.resize (n_dofs_on_proc, 0);
2571  n_oz.resize (n_dofs_on_proc, 0);
2572 
2573  for (dof_id_type i=0; i<n_dofs_on_proc; i++)
2574  {
2575  // Get the row of the sparsity pattern
2577 
2578  for (dof_id_type j=0; j<row.size(); j++)
2579  if ((row[j] < first_dof_on_proc) || (row[j] >= end_dof_on_proc))
2580  n_oz[i]++;
2581  else
2582  n_nz[i]++;
2583 
2584  // If we're not building a full sparsity pattern, then we want
2585  // to avoid overcounting these entries as much as possible.
2587  row.clear();
2588  }
2589 }
2590 
2591 
2592 
2594 {
2595  const processor_id_type proc_id = mesh.processor_id();
2596  const dof_id_type n_global_dofs = dof_map.n_dofs();
2597  const dof_id_type n_dofs_on_proc = dof_map.n_dofs_on_processor(proc_id);
2598  const dof_id_type first_dof_on_proc = dof_map.first_dof(proc_id);
2599  const dof_id_type end_dof_on_proc = dof_map.end_dof(proc_id);
2600 
2601  libmesh_assert_equal_to (sparsity_pattern.size(), other.sparsity_pattern.size());
2602  libmesh_assert_equal_to (n_nz.size(), sparsity_pattern.size());
2603  libmesh_assert_equal_to (n_oz.size(), sparsity_pattern.size());
2604 
2605  for (dof_id_type r=0; r<n_dofs_on_proc; r++)
2606  {
2607  // increment the number of on and off-processor nonzeros in this row
2608  // (note this will be an upper bound unless we need the full sparsity pattern)
2609  if (need_full_sparsity_pattern)
2610  {
2611  SparsityPattern::Row &my_row = sparsity_pattern[r];
2612  const SparsityPattern::Row &their_row = other.sparsity_pattern[r];
2613 
2614  // simple copy if I have no dofs
2615  if (my_row.empty())
2616  my_row = their_row;
2617 
2618  // otherwise add their DOFs to mine, resort, and re-unique the row
2619  else if (!their_row.empty()) // do nothing for the trivial case where
2620  { // their row is empty
2621  my_row.insert (my_row.end(),
2622  their_row.begin(),
2623  their_row.end());
2624 
2625  // We cannot use SparsityPattern::sort_row() here because it expects
2626  // the [begin,middle) [middle,end) to be non-overlapping. This is not
2627  // necessarily the case here, so use std::sort()
2628  std::sort (my_row.begin(), my_row.end());
2629 
2630  my_row.erase(std::unique (my_row.begin(), my_row.end()), my_row.end());
2631  }
2632 
2633  // fix the number of on and off-processor nonzeros in this row
2634  n_nz[r] = n_oz[r] = 0;
2635 
2636  for (std::size_t j=0; j<my_row.size(); j++)
2637  if ((my_row[j] < first_dof_on_proc) || (my_row[j] >= end_dof_on_proc))
2638  n_oz[r]++;
2639  else
2640  n_nz[r]++;
2641  }
2642  else
2643  {
2644  n_nz[r] += other.n_nz[r];
2645  n_nz[r] = std::min(n_nz[r], n_dofs_on_proc);
2646  n_oz[r] += other.n_oz[r];
2647  n_oz[r] =std::min(n_oz[r], static_cast<dof_id_type>(n_global_dofs-n_nz[r]));
2648  }
2649  }
2650 
2651  // Move nonlocal row information to ourselves; the other thread
2652  // won't need it in the map after that.
2653  NonlocalGraph::const_iterator it = other.nonlocal_pattern.begin();
2654  for (; it != other.nonlocal_pattern.end(); ++it)
2655  {
2656  const dof_id_type dof_id = it->first;
2657 
2658 #ifndef NDEBUG
2659  processor_id_type dbg_proc_id = 0;
2660  while (dof_id >= dof_map.end_dof(dbg_proc_id))
2661  dbg_proc_id++;
2662  libmesh_assert (dbg_proc_id != this->processor_id());
2663 #endif
2664 
2665  const SparsityPattern::Row &their_row = it->second;
2666 
2667  // We should have no empty values in a map
2668  libmesh_assert (!their_row.empty());
2669 
2670  NonlocalGraph::iterator my_it = nonlocal_pattern.find(it->first);
2671  if (my_it == nonlocal_pattern.end())
2672  {
2673 // nonlocal_pattern[it->first].swap(their_row);
2674  nonlocal_pattern[it->first] = their_row;
2675  }
2676  else
2677  {
2678  SparsityPattern::Row &my_row = my_it->second;
2679 
2680  my_row.insert (my_row.end(),
2681  their_row.begin(),
2682  their_row.end());
2683 
2684  // We cannot use SparsityPattern::sort_row() here because it expects
2685  // the [begin,middle) [middle,end) to be non-overlapping. This is not
2686  // necessarily the case here, so use std::sort()
2687  std::sort (my_row.begin(), my_row.end());
2688 
2689  my_row.erase(std::unique (my_row.begin(), my_row.end()), my_row.end());
2690  }
2691  }
2692 }
2693 
2694 
2695 
2697 {
2698  parallel_object_only();
2699  this->comm().verify(need_full_sparsity_pattern);
2700 
2701  const dof_id_type n_global_dofs = dof_map.n_dofs();
2702  const dof_id_type n_dofs_on_proc = dof_map.n_dofs_on_processor(this->processor_id());
2703  const dof_id_type local_first_dof = dof_map.first_dof();
2704  const dof_id_type local_end_dof = dof_map.end_dof();
2705 
2706  // Trade sparsity rows with other processors
2707  for (processor_id_type p=1; p != this->n_processors(); ++p)
2708  {
2709  // Push to processor procup while receiving from procdown
2710  processor_id_type procup = (this->processor_id() + p) %
2711  this->n_processors();
2712  processor_id_type procdown = (this->n_processors() +
2713  this->processor_id() - p) %
2714  this->n_processors();
2715 
2716  // Pack the sparsity pattern rows to push to procup
2717  std::vector<dof_id_type> pushed_row_ids,
2718  pushed_row_ids_to_me;
2719  std::vector<std::vector<dof_id_type> > pushed_rows,
2720  pushed_rows_to_me;
2721 
2722  // Move nonlocal row information to a structure to send it from;
2723  // we don't need it in the map after that.
2724  NonlocalGraph::iterator it = nonlocal_pattern.begin();
2725  while (it != nonlocal_pattern.end())
2726  {
2727  const dof_id_type dof_id = it->first;
2728  processor_id_type proc_id = 0;
2729  while (dof_id >= dof_map.end_dof(proc_id))
2730  proc_id++;
2731 
2732  libmesh_assert (proc_id != this->processor_id());
2733 
2734  if (proc_id == procup)
2735  {
2736  pushed_row_ids.push_back(dof_id);
2737 
2738  // We can't just do the swap trick here, thanks to the
2739  // differing vector allocators?
2740  pushed_rows.push_back(std::vector<dof_id_type>());
2741  pushed_rows.back().assign
2742  (it->second.begin(), it->second.end());
2743 
2744  nonlocal_pattern.erase(it++);
2745  }
2746  else
2747  ++it;
2748  }
2749 
2750  this->comm().send_receive(procup, pushed_row_ids,
2751  procdown, pushed_row_ids_to_me);
2752  this->comm().send_receive(procup, pushed_rows,
2753  procdown, pushed_rows_to_me);
2754  pushed_row_ids.clear();
2755  pushed_rows.clear();
2756 
2757  const std::size_t n_rows = pushed_row_ids_to_me.size();
2758  for (std::size_t i=0; i != n_rows; ++i)
2759  {
2760  const dof_id_type r = pushed_row_ids_to_me[i];
2761  const dof_id_type my_r = r - local_first_dof;
2762 
2763  std::vector<dof_id_type> &their_row = pushed_rows_to_me[i];
2764 
2765  if (need_full_sparsity_pattern)
2766  {
2767  SparsityPattern::Row &my_row =
2768  sparsity_pattern[my_r];
2769 
2770  // They wouldn't have sent an empty row
2771  libmesh_assert(!their_row.empty());
2772 
2773  // We can end up with an empty row on a dof that touches our
2774  // inactive elements but not our active ones
2775  if (my_row.empty())
2776  {
2777  my_row.assign (their_row.begin(),
2778  their_row.end());
2779  }
2780  else
2781  {
2782  my_row.insert (my_row.end(),
2783  their_row.begin(),
2784  their_row.end());
2785 
2786  // We cannot use SparsityPattern::sort_row() here because it expects
2787  // the [begin,middle) [middle,end) to be non-overlapping. This is not
2788  // necessarily the case here, so use std::sort()
2789  std::sort (my_row.begin(), my_row.end());
2790 
2791  my_row.erase(std::unique (my_row.begin(), my_row.end()), my_row.end());
2792  }
2793 
2794  // fix the number of on and off-processor nonzeros in this row
2795  n_nz[my_r] = n_oz[my_r] = 0;
2796 
2797  for (std::size_t j=0; j<my_row.size(); j++)
2798  if ((my_row[j] < local_first_dof) || (my_row[j] >= local_end_dof))
2799  n_oz[my_r]++;
2800  else
2801  n_nz[my_r]++;
2802  }
2803  else
2804  {
2805  for (std::size_t j=0; j<their_row.size(); j++)
2806  if ((their_row[j] < local_first_dof) || (their_row[j] >= local_end_dof))
2807  n_oz[my_r]++;
2808  else
2809  n_nz[my_r]++;
2810 
2811  n_nz[my_r] = std::min(n_nz[my_r], n_dofs_on_proc);
2812  n_oz[my_r] = std::min(n_oz[my_r],
2813  static_cast<dof_id_type>(n_global_dofs-n_nz[my_r]));
2814  }
2815  }
2816  }
2817 
2818  // We should have sent everything at this point.
2819  libmesh_assert (nonlocal_pattern.empty());
2820 }
2821 
2822 
2823 
2824 void DofMap::print_info(std::ostream& os) const
2825 {
2826  os << this->get_info();
2827 }
2828 
2829 
2830 
2831 std::string DofMap::get_info() const
2832 {
2833  std::ostringstream os;
2834 
2835  // If we didn't calculate the exact sparsity pattern, the threaded
2836  // sparsity pattern assembly may have just given us an upper bound
2837  // on sparsity.
2838  const char* may_equal = " <= ";
2839 
2840  // If we calculated the exact sparsity pattern, then we can report
2841  // exact bandwidth figures:
2842  std::vector<SparseMatrix<Number>* >::const_iterator
2843  pos = _matrices.begin(),
2844  end = _matrices.end();
2845 
2846  for (; pos != end; ++pos)
2847  if ((*pos)->need_full_sparsity_pattern())
2848  may_equal = " = ";
2849 
2850  dof_id_type max_n_nz = 0, max_n_oz = 0;
2851  long double avg_n_nz = 0, avg_n_oz = 0;
2852 
2853  if (_n_nz)
2854  {
2855  for (std::size_t i = 0; i != _n_nz->size(); ++i)
2856  {
2857  max_n_nz = std::max(max_n_nz, (*_n_nz)[i]);
2858  avg_n_nz += (*_n_nz)[i];
2859  }
2860 
2861  std::size_t n_nz_size = _n_nz->size();
2862 
2863  this->comm().max(max_n_nz);
2864  this->comm().sum(avg_n_nz);
2865  this->comm().sum(n_nz_size);
2866 
2867  avg_n_nz /= std::max(n_nz_size,std::size_t(1));
2868 
2869  libmesh_assert(_n_oz);
2870 
2871  for (std::size_t i = 0; i != (*_n_oz).size(); ++i)
2872  {
2873  max_n_oz = std::max(max_n_oz, (*_n_oz)[i]);
2874  avg_n_oz += (*_n_oz)[i];
2875  }
2876 
2877  std::size_t n_oz_size = _n_oz->size();
2878 
2879  this->comm().max(max_n_oz);
2880  this->comm().sum(avg_n_oz);
2881  this->comm().sum(n_oz_size);
2882 
2883  avg_n_oz /= std::max(n_oz_size,std::size_t(1));
2884  }
2885 
2886  os << " DofMap Sparsity\n Average On-Processor Bandwidth"
2887  << may_equal << avg_n_nz << '\n';
2888 
2889  os << " Average Off-Processor Bandwidth"
2890  << may_equal << avg_n_oz << '\n';
2891 
2892  os << " Maximum On-Processor Bandwidth"
2893  << may_equal << max_n_nz << '\n';
2894 
2895  os << " Maximum Off-Processor Bandwidth"
2896  << may_equal << max_n_oz << std::endl;
2897 
2898 #ifdef LIBMESH_ENABLE_CONSTRAINTS
2899 
2900  std::size_t n_constraints = 0, max_constraint_length = 0,
2901  n_rhss = 0;
2902  long double avg_constraint_length = 0.;
2903 
2904  for (DofConstraints::const_iterator it=_dof_constraints.begin();
2905  it != _dof_constraints.end(); ++it)
2906  {
2907  // Only count local constraints, then sum later
2908  const dof_id_type constrained_dof = it->first;
2909  if (constrained_dof < this->first_dof() ||
2910  constrained_dof >= this->end_dof())
2911  continue;
2912 
2913  const DofConstraintRow& row = it->second;
2914  std::size_t rowsize = row.size();
2915 
2916  max_constraint_length = std::max(max_constraint_length,
2917  rowsize);
2918  avg_constraint_length += rowsize;
2919  n_constraints++;
2920 
2921  if (_primal_constraint_values.count(constrained_dof))
2922  n_rhss++;
2923  }
2924 
2925  this->comm().sum(n_constraints);
2926  this->comm().sum(n_rhss);
2927  this->comm().sum(avg_constraint_length);
2928  this->comm().max(max_constraint_length);
2929 
2930  os << " DofMap Constraints\n Number of DoF Constraints = "
2931  << n_constraints;
2932  if (n_rhss)
2933  os << '\n'
2934  << " Number of Heterogenous Constraints= " << n_rhss;
2935  if (n_constraints)
2936  {
2937  avg_constraint_length /= n_constraints;
2938 
2939  os << '\n'
2940  << " Average DoF Constraint Length= " << avg_constraint_length;
2941  }
2942 
2943 #ifdef LIBMESH_ENABLE_NODE_CONSTRAINTS
2944  std::size_t n_node_constraints = 0, max_node_constraint_length = 0,
2945  n_node_rhss = 0;
2946  long double avg_node_constraint_length = 0.;
2947 
2948  for (NodeConstraints::const_iterator it=_node_constraints.begin();
2949  it != _node_constraints.end(); ++it)
2950  {
2951  // Only count local constraints, then sum later
2952  const Node *node = it->first;
2953  if (node->processor_id() != this->processor_id())
2954  continue;
2955 
2956  const NodeConstraintRow& row = it->second.first;
2957  std::size_t rowsize = row.size();
2958 
2959  max_node_constraint_length = std::max(max_node_constraint_length,
2960  rowsize);
2961  avg_node_constraint_length += rowsize;
2962  n_node_constraints++;
2963 
2964  if (it->second.second != Point(0))
2965  n_node_rhss++;
2966  }
2967 
2968  this->comm().sum(n_node_constraints);
2969  this->comm().sum(n_node_rhss);
2970  this->comm().sum(avg_node_constraint_length);
2971  this->comm().max(max_node_constraint_length);
2972 
2973  os << "\n Number of Node Constraints = " << n_node_constraints;
2974  if (n_node_rhss)
2975  os << '\n'
2976  << " Number of Heterogenous Node Constraints= " << n_node_rhss;
2977  if (n_node_constraints)
2978  {
2979  avg_node_constraint_length /= n_node_constraints;
2980  os << "\n Maximum Node Constraint Length= " << max_node_constraint_length
2981  << '\n'
2982  << " Average Node Constraint Length= " << avg_node_constraint_length;
2983  }
2984 #endif // LIBMESH_ENABLE_NODE_CONSTRAINTS
2985 
2986  os << std::endl;
2987 
2988 #endif // LIBMESH_ENABLE_CONSTRAINTS
2989 
2990  return os.str();
2991 }
2992 
2993 } // namespace libMesh

Site Created By: libMesh Developers
Last modified: February 07 2014 16:57:04 UTC

Hosted By:
SourceForge.net Logo