https://mooseframework.inl.gov
NodalVoidVolume.C
Go to the documentation of this file.
1 //* This file is part of the MOOSE framework
2 //* https://mooseframework.inl.gov
3 //*
4 //* All rights reserved, see COPYRIGHT for full restrictions
5 //* https://github.com/idaholab/moose/blob/master/COPYRIGHT
6 //*
7 //* Licensed under LGPL 2.1, please see LICENSE for details
8 //* https://www.gnu.org/licenses/lgpl-2.1.html
9 
10 #include "NodalVoidVolume.h"
11 #include "Assembly.h"
12 #include "libmesh/parallel_sync.h"
13 
14 registerMooseObject("GeochemistryApp", NodalVoidVolume);
15 
18 {
20  params.addCoupledVar("porosity", 1.0, "Porosity");
21  params.addCoupledVar(
22  "concentration",
23  "A concentration variable. This is only used to determine the finite-element type of your "
24  "concentration variable. The default is linear Lagrange. Therefore, if you are using "
25  "linear-lagrange variables you do not need to supply this input");
26  // All elements that share a node with the elements owned by this processor are known about by
27  // this UserObject
28  params.addRelationshipManager("ElementPointNeighborLayers",
32  [](const InputParameters &, InputParameters & rm_params)
33  { rm_params.set<unsigned short>("layers") = 1; });
34  params.addClassDescription(
35  "UserObject to compute the nodal void volume. Take care if you block-restrict this "
36  "UserObject, since the volumes of the nodes on the block's boundary will not include any "
37  "contributions from outside the block.");
38  return params;
39 }
40 
42  : ElementUserObject(parameters),
43  _porosity(coupledValue("porosity")),
44  _rebuilding_needed(true),
45  _my_pid(processor_id()),
46  _phi(isParamValid("concentration") ? getVar("concentration", 0)->phi()
47  : _assembly.fePhi<Real>(FEType(1, FEFamily::LAGRANGE)))
48 {
49 }
50 
51 void
53 {
56  rebuildStructures(); // reinitialize _nodal_void_volume and rebuild MPI communication lists
57 }
58 
59 void
61 {
63  _rebuilding_needed = true; // signal that internal datastructures need rebuilding
64 }
65 
66 void
68 {
71  rebuildStructures(); // reinitialize _nodal_void_volume and rebuild MPI communication lists
72 }
73 
74 void
76 {
77  // Because of the RelationshipManager, this processor knows about all its elements as well as 1
78  // layer of ghost elements. Hence, the loop over getNonlinearEvaluableElementRange below goes
79  // over all the local elements as well as 1 layer of ghost elements, and all their nodes are
80  // recorded. The ghosted elements are not visited in execute() so the nodal volume is incorrectly
81  // computed for all the nodes belonging to ghosted elements. So MPI communication of nodal volume
82  // info is needed (implemented in exchangeGhostedInfo).
83  _nodal_void_volume.clear();
84  for (const auto & elem : _fe_problem.getNonlinearEvaluableElementRange())
85  if (this->hasBlocks(elem->subdomain_id()))
86  for (const auto & node : elem->node_ref_range())
87  _nodal_void_volume[&node] = 0.0;
88  if (_app.n_processors() > 1)
90  _rebuilding_needed = false;
91 }
92 
93 void
95 {
96  // global_node_nums_to_receive[pid] is a list of node numbers connected to elements owned by
97  // processor pid. _nodes_to_receive[pid] is a list of node pointers. Both lists have the same
98  // ordering
99  std::map<processor_id_type, std::vector<dof_id_type>> global_node_nums_to_receive;
100  _nodes_to_receive.clear();
101  // seen_nodes are those that have been seen to be attached to an element of given processor ID
102  std::map<processor_id_type, std::set<const Node *>> seen_nodes;
103  // run through all elements known by this processor (the owned + 1 layer of ghosted elements),
104  // recording nodes that are attached to elements that aren't owned by this processor
105  for (const auto & elem : _fe_problem.getNonlinearEvaluableElementRange())
106  if (this->hasBlocks(elem->subdomain_id()))
107  {
108  const processor_id_type elem_pid = elem->processor_id();
109  if (elem_pid != _my_pid)
110  {
111  auto & pid_nodes = seen_nodes[elem_pid];
112  for (const auto & node : elem->node_ref_range())
113  if (pid_nodes.insert(&node).second) // true if the node has not been seen
114  {
115  global_node_nums_to_receive[elem_pid].push_back(node.id());
116  _nodes_to_receive[elem_pid].push_back(&node);
117  }
118  }
119  }
120 
121  // exchange this info with other processors, building global_node_nums_to_send at the same time
122  std::map<processor_id_type, std::vector<dof_id_type>> global_node_nums_to_send;
123  auto nodes_action_functor = [&](processor_id_type pid, const std::vector<dof_id_type> & nts)
124  { global_node_nums_to_send[pid] = nts; };
125  Parallel::push_parallel_vector_data(
126  this->comm(), global_node_nums_to_receive, nodes_action_functor);
127 
128  // Build _nodes_to_send using global_node_nums_to_send, keeping the same ordering in the
129  // std::vector
130  _nodes_to_send.clear();
131  for (const auto & kv : global_node_nums_to_send)
132  {
133  const processor_id_type pid = kv.first;
134  auto & pid_entry = _nodes_to_send[pid];
135  pid_entry.reserve(kv.second.size());
136  for (const auto & node_num : kv.second)
137  pid_entry.push_back(_mesh.nodePtr(node_num));
138  }
139 }
140 
141 void
143 {
144  // Exchange ghosted _nodal_void_volume information with other processors
145  std::map<processor_id_type, std::vector<Real>> nvv_to_send;
146  for (const auto & kv : _nodes_to_send)
147  {
148  const processor_id_type pid = kv.first;
149  auto & pid_entry = nvv_to_send[pid];
150  pid_entry.reserve(kv.second.size());
151  for (const auto & nd : kv.second)
152  pid_entry.push_back(_nodal_void_volume.at(nd));
153  }
154 
155  auto nvv_action_functor = [this](processor_id_type pid, const std::vector<Real> & nvv_received)
156  {
157  const std::size_t msg_size = nvv_received.size();
158  auto & receive_pid_entry = _nodes_to_receive[pid];
159  mooseAssert(msg_size == receive_pid_entry.size(),
160  "Message size, " << msg_size
161  << ", incompatible with nodes_to_receive, which has size "
162  << receive_pid_entry.size());
163  for (std::size_t i = 0; i < msg_size; ++i)
164  _nodal_void_volume[receive_pid_entry[i]] += nvv_received[i];
165  };
166  Parallel::push_parallel_vector_data(this->comm(), nvv_to_send, nvv_action_functor);
167 }
168 
169 void
171 {
172  for (auto & nvv : _nodal_void_volume)
173  nvv.second = 0.0;
174 }
175 
176 void
178 {
179  if (_app.n_processors() > 1)
181  // Now _nodal_void_volume is correct for all nodes within and on the boundary of this processor's
182  // domain.
183 }
184 
185 void
187 {
188  // _nodal_void_volume will have been computed by other threads: add their contributions to ours.
189  const auto & nvv = static_cast<const NodalVoidVolume &>(uo);
190  for (auto & our_nvv : _nodal_void_volume)
191  our_nvv.second += nvv._nodal_void_volume.at(our_nvv.first);
192  // Now _nodal_void_volume is correct for all nodes within this processor's domain (but potentially
193  // not those on the boundary: exchangeGhostedInfo() will fix that)
194 }
195 
196 void
198 {
199  // this gets called for all elements owned by this processor. So, after threadJoin(), it
200  // correctly computes _nodal_void_volume for nodes compltely within this processor's domain.
201  for (unsigned i = 0; i < _current_elem->n_nodes(); ++i)
202  {
203  const Node * node = _current_elem->node_ptr(i);
204  mooseAssert(_nodal_void_volume.count(node) == 1, "Node missing in _nodal_void_volume map");
205  auto & nvv = _nodal_void_volume[node];
206  for (unsigned qp = 0; qp < _qrule->n_points(); ++qp)
207  nvv += _JxW[qp] * _coord[qp] * _phi[i][qp] * _porosity[qp];
208  }
209 }
210 
211 Real
212 NodalVoidVolume::getNodalVoidVolume(const Node * node) const
213 {
214  auto find = _nodal_void_volume.find(node);
215  if (find != _nodal_void_volume.end())
216  return find->second;
217  mooseError("nodal id ",
218  node->id(),
219  " not in NodalVoidVolume's data structures. Perhaps the execute_on parameter of "
220  "NodalVoidVolume needs to be set differently");
221 }
LAGRANGE
Computes the void volume associated with each node.
Real getNodalVoidVolume(const Node *node) const
virtual void threadJoin(const UserObject &uo) override
Adds _nodal_void_volume from all threads that have looped over different elements in their execute()...
const MooseArray< Real > & _coord
virtual void meshChanged()
static InputParameters validParams()
void rebuildStructures()
reinitialize _nodal_void_volume and rebuild MPI communication lists of this object ...
virtual void initialize() override
Zeroes _nodal_void_volume.
T & set(const std::string &name, bool quiet_mode=false)
const Parallel::Communicator & comm() const
std::unordered_map< const Node *, Real > _nodal_void_volume
_nodal_void_volume[node] = void volume of the node.
std::map< processor_id_type, std::vector< const Node * > > _nodes_to_send
_nodes_to_send[proc_id] = list of my nodes.
void buildCommLists()
Build MPI communication lists specifying which _nodal_void_volume info should be exchanged with other...
std::map< processor_id_type, std::vector< const Node * > > _nodes_to_receive
_nodes_to_receive[proc_id] = list of my nodes.
void addRelationshipManager(const std::string &name, Moose::RelationshipManagerType rm_type, Moose::RelationshipManagerInputParameterCallback input_parameter_callback=nullptr)
const processor_id_type _my_pid
processor ID of this object
registerMooseObject("GeochemistryApp", NodalVoidVolume)
virtual void timestepSetup() override
If _rebuilding_needed then rebuildStructures()
uint8_t processor_id_type
processor_id_type n_processors() const
void exchangeGhostedInfo()
Exchange _nodal_void_volume for nodes that are joined to elements owned by other processors.
virtual void timestepSetup()
const VariablePhiValue & _phi
shape function
bool _rebuilding_needed
whether reinitializing of _nodal_void_volume and rebuilding MPI communication lists is needed...
virtual const Node * nodePtr(const dof_id_type i) const
virtual void execute() override
Loops over all elements owned by this processor, computing the sum specified above.
virtual void initialSetup() override
void addCoupledVar(const std::string &name, const std::string &doc_string)
DIE A HORRIBLE DEATH HERE typedef LIBMESH_DEFAULT_SCALAR_TYPE Real
const QBase *const & _qrule
const Elem *const & _current_elem
FEProblemBase & _fe_problem
virtual void finalize() override
If there are more than 1 processor, call exchangeGhostedInfo.
const MooseArray< Real > & _JxW
void mooseError(Args &&... args) const
void addClassDescription(const std::string &doc_string)
const VariableValue & _porosity
porosity
NodalVoidVolume(const InputParameters &parameters)
static InputParameters validParams()
bool hasBlocks(const SubdomainName &name) const
FEFamily
virtual void initialSetup()
MooseMesh & _mesh
const libMesh::ConstElemRange & getNonlinearEvaluableElementRange()
virtual void meshChanged() override
Set _rebuilding_needed = true to signal that the internal datastructures need rebuilding.