12 #include "libmesh/parallel_sync.h" 23 "A concentration variable. This is only used to determine the finite-element type of your " 24 "concentration variable. The default is linear Lagrange. Therefore, if you are using " 25 "linear-lagrange variables you do not need to supply this input");
33 { rm_params.
set<
unsigned short>(
"layers") = 1; });
35 "UserObject to compute the nodal void volume. Take care if you block-restrict this " 36 "UserObject, since the volumes of the nodes on the block's boundary will not include any " 37 "contributions from outside the block.");
43 _porosity(coupledValue(
"porosity")),
44 _rebuilding_needed(true),
45 _my_pid(processor_id()),
46 _phi(isParamValid(
"concentration") ? getVar(
"concentration", 0)->phi()
85 if (this->
hasBlocks(elem->subdomain_id()))
86 for (
const auto & node : elem->node_ref_range())
99 std::map<processor_id_type, std::vector<dof_id_type>> global_node_nums_to_receive;
102 std::map<processor_id_type, std::set<const Node *>> seen_nodes;
106 if (this->
hasBlocks(elem->subdomain_id()))
111 auto & pid_nodes = seen_nodes[elem_pid];
112 for (
const auto & node : elem->node_ref_range())
113 if (pid_nodes.insert(&node).second)
115 global_node_nums_to_receive[elem_pid].push_back(node.id());
122 std::map<processor_id_type, std::vector<dof_id_type>> global_node_nums_to_send;
123 auto nodes_action_functor = [&](
processor_id_type pid,
const std::vector<dof_id_type> & nts)
124 { global_node_nums_to_send[pid] = nts; };
125 Parallel::push_parallel_vector_data(
126 this->
comm(), global_node_nums_to_receive, nodes_action_functor);
131 for (
const auto & kv : global_node_nums_to_send)
135 pid_entry.reserve(kv.second.size());
136 for (
const auto & node_num : kv.second)
145 std::map<processor_id_type, std::vector<Real>> nvv_to_send;
149 auto & pid_entry = nvv_to_send[pid];
150 pid_entry.reserve(kv.second.size());
151 for (
const auto & nd : kv.second)
155 auto nvv_action_functor = [
this](
processor_id_type pid,
const std::vector<Real> & nvv_received)
157 const std::size_t msg_size = nvv_received.size();
159 mooseAssert(msg_size == receive_pid_entry.size(),
160 "Message size, " << msg_size
161 <<
", incompatible with nodes_to_receive, which has size " 162 << receive_pid_entry.size());
163 for (std::size_t i = 0; i < msg_size; ++i)
166 Parallel::push_parallel_vector_data(this->
comm(), nvv_to_send, nvv_action_functor);
204 mooseAssert(
_nodal_void_volume.count(node) == 1,
"Node missing in _nodal_void_volume map");
206 for (
unsigned qp = 0; qp <
_qrule->n_points(); ++qp)
219 " not in NodalVoidVolume's data structures. Perhaps the execute_on parameter of " 220 "NodalVoidVolume needs to be set differently");
Computes the void volume associated with each node.
Real getNodalVoidVolume(const Node *node) const
virtual void threadJoin(const UserObject &uo) override
Adds _nodal_void_volume from all threads that have looped over different elements in their execute()...
const MooseArray< Real > & _coord
virtual void meshChanged()
static InputParameters validParams()
void rebuildStructures()
reinitialize _nodal_void_volume and rebuild MPI communication lists of this object ...
virtual void initialize() override
Zeroes _nodal_void_volume.
const Parallel::Communicator & comm() const
std::unordered_map< const Node *, Real > _nodal_void_volume
_nodal_void_volume[node] = void volume of the node.
std::map< processor_id_type, std::vector< const Node * > > _nodes_to_send
_nodes_to_send[proc_id] = list of my nodes.
void buildCommLists()
Build MPI communication lists specifying which _nodal_void_volume info should be exchanged with other...
std::map< processor_id_type, std::vector< const Node * > > _nodes_to_receive
_nodes_to_receive[proc_id] = list of my nodes.
const processor_id_type _my_pid
processor ID of this object
registerMooseObject("GeochemistryApp", NodalVoidVolume)
virtual void timestepSetup() override
If _rebuilding_needed then rebuildStructures()
uint8_t processor_id_type
processor_id_type n_processors() const
void exchangeGhostedInfo()
Exchange _nodal_void_volume for nodes that are joined to elements owned by other processors.
virtual void timestepSetup()
const VariablePhiValue & _phi
shape function
bool _rebuilding_needed
whether reinitializing of _nodal_void_volume and rebuilding MPI communication lists is needed...
virtual const Node * nodePtr(const dof_id_type i) const
virtual void execute() override
Loops over all elements owned by this processor, computing the sum specified above.
virtual void initialSetup() override
DIE A HORRIBLE DEATH HERE typedef LIBMESH_DEFAULT_SCALAR_TYPE Real
const QBase *const & _qrule
const Elem *const & _current_elem
FEProblemBase & _fe_problem
virtual void finalize() override
If there are more than 1 processor, call exchangeGhostedInfo.
const MooseArray< Real > & _JxW
void mooseError(Args &&... args) const
const VariableValue & _porosity
porosity
NodalVoidVolume(const InputParameters ¶meters)
static InputParameters validParams()
bool hasBlocks(const SubdomainName &name) const
virtual void initialSetup()
const libMesh::ConstElemRange & getNonlinearEvaluableElementRange()
virtual void meshChanged() override
Set _rebuilding_needed = true to signal that the internal datastructures need rebuilding.