Line data Source code
1 : //* This file is part of the MOOSE framework 2 : //* https://mooseframework.inl.gov 3 : //* 4 : //* All rights reserved, see COPYRIGHT for full restrictions 5 : //* https://github.com/idaholab/moose/blob/master/COPYRIGHT 6 : //* 7 : //* Licensed under LGPL 2.1, please see LICENSE for details 8 : //* https://www.gnu.org/licenses/lgpl-2.1.html 9 : 10 : #include "NodalVoidVolume.h" 11 : #include "Assembly.h" 12 : #include "libmesh/parallel_sync.h" 13 : 14 : registerMooseObject("GeochemistryApp", NodalVoidVolume); 15 : 16 : InputParameters 17 119 : NodalVoidVolume::validParams() 18 : { 19 119 : InputParameters params = ElementUserObject::validParams(); 20 238 : params.addCoupledVar("porosity", 1.0, "Porosity"); 21 238 : params.addCoupledVar( 22 : "concentration", 23 : "A concentration variable. This is only used to determine the finite-element type of your " 24 : "concentration variable. The default is linear Lagrange. Therefore, if you are using " 25 : "linear-lagrange variables you do not need to supply this input"); 26 : // All elements that share a node with the elements owned by this processor are known about by 27 : // this UserObject 28 238 : params.addRelationshipManager("ElementPointNeighborLayers", 29 : Moose::RelationshipManagerType::GEOMETRIC | 30 : Moose::RelationshipManagerType::ALGEBRAIC | 31 : Moose::RelationshipManagerType::COUPLING, 32 110 : [](const InputParameters &, InputParameters & rm_params) 33 110 : { rm_params.set<unsigned short>("layers") = 1; }); 34 119 : params.addClassDescription( 35 : "UserObject to compute the nodal void volume. Take care if you block-restrict this " 36 : "UserObject, since the volumes of the nodes on the block's boundary will not include any " 37 : "contributions from outside the block."); 38 119 : return params; 39 0 : } 40 : 41 81 : NodalVoidVolume::NodalVoidVolume(const InputParameters & parameters) 42 : : ElementUserObject(parameters), 43 81 : _porosity(coupledValue("porosity")), 44 81 : _rebuilding_needed(true), 45 81 : _my_pid(processor_id()), 46 243 : _phi(isParamValid("concentration") ? getVar("concentration", 0)->phi() 47 162 : : _assembly.fePhi<Real>(FEType(1, FEFamily::LAGRANGE))) 48 : { 49 81 : } 50 : 51 : void 52 78 : NodalVoidVolume::initialSetup() 53 : { 54 78 : ElementUserObject::initialSetup(); 55 78 : if (_rebuilding_needed) 56 78 : rebuildStructures(); // reinitialize _nodal_void_volume and rebuild MPI communication lists 57 78 : } 58 : 59 : void 60 39 : NodalVoidVolume::meshChanged() 61 : { 62 : ElementUserObject::meshChanged(); 63 39 : _rebuilding_needed = true; // signal that internal datastructures need rebuilding 64 39 : } 65 : 66 : void 67 125 : NodalVoidVolume::timestepSetup() 68 : { 69 125 : ElementUserObject::timestepSetup(); 70 125 : if (_rebuilding_needed) 71 39 : rebuildStructures(); // reinitialize _nodal_void_volume and rebuild MPI communication lists 72 125 : } 73 : 74 : void 75 117 : NodalVoidVolume::rebuildStructures() 76 : { 77 : // Because of the RelationshipManager, this processor knows about all its elements as well as 1 78 : // layer of ghost elements. Hence, the loop over getNonlinearEvaluableElementRange below goes 79 : // over all the local elements as well as 1 layer of ghost elements, and all their nodes are 80 : // recorded. The ghosted elements are not visited in execute() so the nodal volume is incorrectly 81 : // computed for all the nodes belonging to ghosted elements. So MPI communication of nodal volume 82 : // info is needed (implemented in exchangeGhostedInfo). 83 : _nodal_void_volume.clear(); 84 1122 : for (const auto & elem : _fe_problem.getNonlinearEvaluableElementRange()) 85 1005 : if (this->hasBlocks(elem->subdomain_id())) 86 5025 : for (const auto & node : elem->node_ref_range()) 87 4020 : _nodal_void_volume[&node] = 0.0; 88 117 : if (_app.n_processors() > 1) 89 78 : buildCommLists(); 90 117 : _rebuilding_needed = false; 91 117 : } 92 : 93 : void 94 78 : NodalVoidVolume::buildCommLists() 95 : { 96 : // global_node_nums_to_receive[pid] is a list of node numbers connected to elements owned by 97 : // processor pid. _nodes_to_receive[pid] is a list of node pointers. Both lists have the same 98 : // ordering 99 : std::map<processor_id_type, std::vector<dof_id_type>> global_node_nums_to_receive; 100 : _nodes_to_receive.clear(); 101 : // seen_nodes are those that have been seen to be attached to an element of given processor ID 102 : std::map<processor_id_type, std::set<const Node *>> seen_nodes; 103 : // run through all elements known by this processor (the owned + 1 layer of ghosted elements), 104 : // recording nodes that are attached to elements that aren't owned by this processor 105 693 : for (const auto & elem : _fe_problem.getNonlinearEvaluableElementRange()) 106 615 : if (this->hasBlocks(elem->subdomain_id())) 107 : { 108 615 : const processor_id_type elem_pid = elem->processor_id(); 109 615 : if (elem_pid != _my_pid) 110 : { 111 225 : auto & pid_nodes = seen_nodes[elem_pid]; 112 1125 : for (const auto & node : elem->node_ref_range()) 113 900 : if (pid_nodes.insert(&node).second) // true if the node has not been seen 114 : { 115 628 : global_node_nums_to_receive[elem_pid].push_back(node.id()); 116 628 : _nodes_to_receive[elem_pid].push_back(&node); 117 : } 118 : } 119 : } 120 : 121 : // exchange this info with other processors, building global_node_nums_to_send at the same time 122 : std::map<processor_id_type, std::vector<dof_id_type>> global_node_nums_to_send; 123 78 : auto nodes_action_functor = [&](processor_id_type pid, const std::vector<dof_id_type> & nts) 124 156 : { global_node_nums_to_send[pid] = nts; }; 125 78 : Parallel::push_parallel_vector_data( 126 : this->comm(), global_node_nums_to_receive, nodes_action_functor); 127 : 128 : // Build _nodes_to_send using global_node_nums_to_send, keeping the same ordering in the 129 : // std::vector 130 : _nodes_to_send.clear(); 131 156 : for (const auto & kv : global_node_nums_to_send) 132 : { 133 78 : const processor_id_type pid = kv.first; 134 78 : auto & pid_entry = _nodes_to_send[pid]; 135 78 : pid_entry.reserve(kv.second.size()); 136 706 : for (const auto & node_num : kv.second) 137 628 : pid_entry.push_back(_mesh.nodePtr(node_num)); 138 : } 139 78 : } 140 : 141 : void 142 36 : NodalVoidVolume::exchangeGhostedInfo() 143 : { 144 : // Exchange ghosted _nodal_void_volume information with other processors 145 : std::map<processor_id_type, std::vector<Real>> nvv_to_send; 146 72 : for (const auto & kv : _nodes_to_send) 147 : { 148 36 : const processor_id_type pid = kv.first; 149 36 : auto & pid_entry = nvv_to_send[pid]; 150 36 : pid_entry.reserve(kv.second.size()); 151 326 : for (const auto & nd : kv.second) 152 290 : pid_entry.push_back(_nodal_void_volume.at(nd)); 153 : } 154 : 155 36 : auto nvv_action_functor = [this](processor_id_type pid, const std::vector<Real> & nvv_received) 156 : { 157 : const std::size_t msg_size = nvv_received.size(); 158 36 : auto & receive_pid_entry = _nodes_to_receive[pid]; 159 : mooseAssert(msg_size == receive_pid_entry.size(), 160 : "Message size, " << msg_size 161 : << ", incompatible with nodes_to_receive, which has size " 162 : << receive_pid_entry.size()); 163 326 : for (std::size_t i = 0; i < msg_size; ++i) 164 290 : _nodal_void_volume[receive_pid_entry[i]] += nvv_received[i]; 165 36 : }; 166 36 : Parallel::push_parallel_vector_data(this->comm(), nvv_to_send, nvv_action_functor); 167 36 : } 168 : 169 : void 170 125 : NodalVoidVolume::initialize() 171 : { 172 2121 : for (auto & nvv : _nodal_void_volume) 173 1996 : nvv.second = 0.0; 174 125 : } 175 : 176 : void 177 58 : NodalVoidVolume::finalize() 178 : { 179 58 : if (_app.n_processors() > 1) 180 36 : exchangeGhostedInfo(); 181 : // Now _nodal_void_volume is correct for all nodes within and on the boundary of this processor's 182 : // domain. 183 58 : } 184 : 185 : void 186 67 : NodalVoidVolume::threadJoin(const UserObject & uo) 187 : { 188 : // _nodal_void_volume will have been computed by other threads: add their contributions to ours. 189 : const auto & nvv = static_cast<const NodalVoidVolume &>(uo); 190 1137 : for (auto & our_nvv : _nodal_void_volume) 191 1070 : our_nvv.second += nvv._nodal_void_volume.at(our_nvv.first); 192 : // Now _nodal_void_volume is correct for all nodes within this processor's domain (but potentially 193 : // not those on the boundary: exchangeGhostedInfo() will fix that) 194 67 : } 195 : 196 : void 197 392 : NodalVoidVolume::execute() 198 : { 199 : // this gets called for all elements owned by this processor. So, after threadJoin(), it 200 : // correctly computes _nodal_void_volume for nodes compltely within this processor's domain. 201 1960 : for (unsigned i = 0; i < _current_elem->n_nodes(); ++i) 202 : { 203 1568 : const Node * node = _current_elem->node_ptr(i); 204 : mooseAssert(_nodal_void_volume.count(node) == 1, "Node missing in _nodal_void_volume map"); 205 : auto & nvv = _nodal_void_volume[node]; 206 7840 : for (unsigned qp = 0; qp < _qrule->n_points(); ++qp) 207 6272 : nvv += _JxW[qp] * _coord[qp] * _phi[i][qp] * _porosity[qp]; 208 : } 209 392 : } 210 : 211 : Real 212 8760 : NodalVoidVolume::getNodalVoidVolume(const Node * node) const 213 : { 214 : auto find = _nodal_void_volume.find(node); 215 8760 : if (find != _nodal_void_volume.end()) 216 8760 : return find->second; 217 0 : mooseError("nodal id ", 218 0 : node->id(), 219 : " not in NodalVoidVolume's data structures. Perhaps the execute_on parameter of " 220 : "NodalVoidVolume needs to be set differently"); 221 : }