https://mooseframework.inl.gov
Functions
Moose::Mortar::Contact Namespace Reference

Functions

template<typename T >
void communicateVelocities (std::unordered_map< const DofObject *, T > &dof_map, const MooseMesh &mesh, const bool nodal, const Parallel::Communicator &communicator, const bool send_data_back)
 This function is used to communicate velocities across processes. More...
 
void communicateR2T (std::unordered_map< const DofObject *, ADRankTwoTensor > &dof_map_adr2t, const MooseMesh &mesh, const bool nodal, const Parallel::Communicator &communicator, const bool send_data_back)
 This function is used to communicate velocities across processes. More...
 
template<typename T >
void communicateRealObject (std::unordered_map< const DofObject *, T > &dof_to_adreal, const MooseMesh &mesh, const bool nodal, const Parallel::Communicator &communicator, const bool send_data_back)
 
void communicateGaps (std::unordered_map< const DofObject *, std::pair< ADReal, Real >> &dof_to_weighted_gap, const MooseMesh &mesh, bool nodal, bool normalize_c, const Parallel::Communicator &communicator, bool send_data_back)
 This function is used to communicate gaps across processes. More...
 

Function Documentation

◆ communicateGaps()

void Moose::Mortar::Contact::communicateGaps ( std::unordered_map< const DofObject *, std::pair< ADReal, Real >> &  dof_to_weighted_gap,
const MooseMesh mesh,
bool  nodal,
bool  normalize_c,
const Parallel::Communicator &  communicator,
bool  send_data_back 
)

This function is used to communicate gaps across processes.

Parameters
dof_to_weighted_gapMap from degree of freedom to weighted (weak) gap
meshMesh used to locate nodes or elements
nodalWhether the element has Lagrange interpolation
normalize_cWhether to normalize with size the c coefficient in contact constraint
communicatorProcess communicator
send_data_backAfter aggregating data on the owning process, whether to send the aggregate back to senders. This can be necessary for things like penalty contact in which the constraint is not enforced by the owner but in a weighted way by the displacement constraints

Definition at line 21 of file MortarContactUtils.C.

Referenced by WeightedGapUserObject::finalize(), ComputeFrictionalForceCartesianLMMechanicalContact::incorrectEdgeDroppingPost(), ComputeWeightedGapCartesianLMMechanicalContact::incorrectEdgeDroppingPost(), ComputeDynamicWeightedGapLMMechanicalContact::incorrectEdgeDroppingPost(), ComputeFrictionalForceCartesianLMMechanicalContact::post(), ComputeWeightedGapCartesianLMMechanicalContact::post(), and ComputeDynamicWeightedGapLMMechanicalContact::post().

28 {
29  libmesh_parallel_only(communicator);
30  const auto our_proc_id = communicator.rank();
31 
32  // We may have weighted gap information that should go to other processes that own the dofs
33  using Datum = std::tuple<dof_id_type, ADReal, Real>;
34  std::unordered_map<processor_id_type, std::vector<Datum>> push_data;
35 
36  for (auto & pr : dof_to_weighted_gap)
37  {
38  const auto * const dof_object = pr.first;
39  const auto proc_id = dof_object->processor_id();
40  if (proc_id == our_proc_id)
41  continue;
42 
43  push_data[proc_id].push_back(
44  std::make_tuple(dof_object->id(), std::move(pr.second.first), pr.second.second));
45  }
46 
47  const auto & lm_mesh = mesh.getMesh();
48  std::unordered_map<processor_id_type, std::vector<const DofObject *>>
49  pid_to_dof_object_for_sending_back;
50 
51  auto action_functor =
52  [nodal,
53  our_proc_id,
54  &lm_mesh,
55  &dof_to_weighted_gap,
56  &normalize_c,
57  &pid_to_dof_object_for_sending_back,
58  send_data_back](const processor_id_type pid, const std::vector<Datum> & sent_data)
59  {
60  mooseAssert(pid != our_proc_id, "We do not send messages to ourself here");
61  libmesh_ignore(our_proc_id);
62 
63  for (auto & [dof_id, weighted_gap, normalization] : sent_data)
64  {
65  const auto * const dof_object =
66  nodal ? static_cast<const DofObject *>(lm_mesh.node_ptr(dof_id))
67  : static_cast<const DofObject *>(lm_mesh.elem_ptr(dof_id));
68  mooseAssert(dof_object, "This should be non-null");
69  if (send_data_back)
70  pid_to_dof_object_for_sending_back[pid].push_back(dof_object);
71  auto & [our_weighted_gap, our_normalization] = dof_to_weighted_gap[dof_object];
72  our_weighted_gap += weighted_gap;
73  if (normalize_c)
74  our_normalization += normalization;
75  }
76  };
77 
78  TIMPI::push_parallel_vector_data(communicator, push_data, action_functor);
79 
80  // Now send data back if requested
81  if (!send_data_back)
82  return;
83 
84  std::unordered_map<processor_id_type, std::vector<Datum>> push_back_data;
85 
86  for (const auto & [pid, dof_objects] : pid_to_dof_object_for_sending_back)
87  {
88  auto & pid_send_data = push_back_data[pid];
89  pid_send_data.reserve(dof_objects.size());
90  for (const DofObject * const dof_object : dof_objects)
91  {
92  const auto & [our_weighted_gap, our_normalization] =
93  libmesh_map_find(dof_to_weighted_gap, dof_object);
94  pid_send_data.push_back(
95  std::make_tuple(dof_object->id(), our_weighted_gap, our_normalization));
96  }
97  }
98 
99  auto sent_back_action_functor =
100  [nodal, our_proc_id, &lm_mesh, &dof_to_weighted_gap, &normalize_c](
101  const processor_id_type libmesh_dbg_var(pid), const std::vector<Datum> & sent_data)
102  {
103  mooseAssert(pid != our_proc_id, "We do not send messages to ourself here");
104  libmesh_ignore(our_proc_id);
105 
106  for (auto & [dof_id, weighted_gap, normalization] : sent_data)
107  {
108  const auto * const dof_object =
109  nodal ? static_cast<const DofObject *>(lm_mesh.node_ptr(dof_id))
110  : static_cast<const DofObject *>(lm_mesh.elem_ptr(dof_id));
111  mooseAssert(dof_object, "This should be non-null");
112  auto & [our_weighted_gap, our_normalization] = dof_to_weighted_gap[dof_object];
113  our_weighted_gap = weighted_gap;
114  if (normalize_c)
115  our_normalization = normalization;
116  }
117  };
118  TIMPI::push_parallel_vector_data(communicator, push_back_data, sent_back_action_functor);
119 }
MeshBase & mesh
void push_parallel_vector_data(const Communicator &comm, MapToVectors &&data, const ActionFunctor &act_on_data)
uint8_t processor_id_type
void libmesh_ignore(const Args &...)
DIE A HORRIBLE DEATH HERE typedef MPI_Comm communicator

◆ communicateR2T()

void Moose::Mortar::Contact::communicateR2T ( std::unordered_map< const DofObject *, ADRankTwoTensor > &  dof_map_adr2t,
const MooseMesh mesh,
const bool  nodal,
const Parallel::Communicator &  communicator,
const bool  send_data_back 
)
inline

This function is used to communicate velocities across processes.

Parameters
dof_map_adr2tMap from degree of freedom to weighted tank two tensor
meshMesh used to locate nodes or elements
nodalWhether the element has Lagrange interpolation
communicatorProcess communicator
send_data_backWhether to send back data to a distributed constraint

Definition at line 170 of file MortarContactUtils.h.

Referenced by BilinearMixedModeCohesiveZoneModel::finalize().

175 {
176  libmesh_parallel_only(communicator);
177  const auto our_proc_id = communicator.rank();
178 
179  // We may have weighted velocity information that should go to other processes that own the dofs
180  using Datum = std::pair<dof_id_type, ADRankTwoTensor>;
181  std::unordered_map<processor_id_type, std::vector<Datum>> push_data;
182 
183  for (auto & pr : dof_map_adr2t)
184  {
185  const auto * const dof_object = pr.first;
186  const auto proc_id = dof_object->processor_id();
187  if (proc_id == our_proc_id)
188  continue;
189 
190  push_data[proc_id].push_back(std::make_pair(dof_object->id(), std::move(pr.second)));
191  }
192 
193  const auto & lm_mesh = mesh.getMesh();
194  std::unordered_map<processor_id_type, std::vector<const DofObject *>>
195  pid_to_dof_object_for_sending_back;
196 
197  auto action_functor =
198  [nodal,
199  our_proc_id,
200  &lm_mesh,
201  &dof_map_adr2t,
202  &pid_to_dof_object_for_sending_back,
203  send_data_back](const processor_id_type pid, const std::vector<Datum> & sent_data)
204  {
205  mooseAssert(pid != our_proc_id, "We do not send messages to ourself here");
206  libmesh_ignore(our_proc_id);
207 
208  for (auto & pr : sent_data)
209  {
210  const auto dof_id = pr.first;
211  const auto * const dof_object =
212  nodal ? static_cast<const DofObject *>(lm_mesh.node_ptr(dof_id))
213  : static_cast<const DofObject *>(lm_mesh.elem_ptr(dof_id));
214  mooseAssert(dof_object, "This should be non-null");
215 
216  if (send_data_back)
217  pid_to_dof_object_for_sending_back[pid].push_back(dof_object);
218 
219  for (const auto i : make_range(3))
220  for (const auto j : make_range(3))
221  dof_map_adr2t[dof_object](i, j) += pr.second(i, j);
222  }
223  };
224 
225  TIMPI::push_parallel_vector_data(communicator, push_data, action_functor);
226 
227  // Now send data back if requested
228  if (!send_data_back)
229  return;
230 
231  std::unordered_map<processor_id_type, std::vector<Datum>> push_back_data;
232 
233  for (const auto & [pid, dof_objects] : pid_to_dof_object_for_sending_back)
234  {
235  auto & pid_send_data = push_back_data[pid];
236  pid_send_data.reserve(dof_objects.size());
237  for (const DofObject * const dof_object : dof_objects)
238  {
239  const auto & r2t = libmesh_map_find(dof_map_adr2t, dof_object);
240  pid_send_data.push_back({dof_object->id(), r2t});
241  }
242  }
243 
244  auto sent_back_action_functor =
245  [nodal, our_proc_id, &lm_mesh, &dof_map_adr2t](const processor_id_type libmesh_dbg_var(pid),
246  const std::vector<Datum> & sent_data)
247  {
248  mooseAssert(pid != our_proc_id, "We do not send messages to ourself here");
249  libmesh_ignore(our_proc_id);
250 
251  for (auto & [dof_id, r2t_sent] : sent_data)
252  {
253  const auto * const dof_object =
254  nodal ? static_cast<const DofObject *>(lm_mesh.node_ptr(dof_id))
255  : static_cast<const DofObject *>(lm_mesh.elem_ptr(dof_id));
256  mooseAssert(dof_object, "This should be non-null");
257  auto & r2t = dof_map_adr2t[dof_object];
258  r2t = r2t_sent;
259  }
260  };
261 
262  TIMPI::push_parallel_vector_data(communicator, push_back_data, sent_back_action_functor);
263 }
MeshBase & mesh
void push_parallel_vector_data(const Communicator &comm, MapToVectors &&data, const ActionFunctor &act_on_data)
uint8_t processor_id_type
void libmesh_ignore(const Args &...)
DIE A HORRIBLE DEATH HERE typedef MPI_Comm communicator
IntRange< T > make_range(T beg, T end)
static const std::complex< double > j(0, 1)
Complex number "j" (also known as "i")

◆ communicateRealObject()

template<typename T >
void Moose::Mortar::Contact::communicateRealObject ( std::unordered_map< const DofObject *, T > &  dof_to_adreal,
const MooseMesh mesh,
const bool  nodal,
const Parallel::Communicator &  communicator,
const bool  send_data_back 
)

Definition at line 267 of file MortarContactUtils.h.

Referenced by BilinearMixedModeCohesiveZoneModel::finalize().

272 {
273  libmesh_parallel_only(communicator);
274  const auto our_proc_id = communicator.rank();
275 
276  // We may have weighted gap information that should go to other processes that own the dofs
277  using Datum = std::tuple<dof_id_type, T>;
278  std::unordered_map<processor_id_type, std::vector<Datum>> push_data;
279 
280  for (auto & pr : dof_to_adreal)
281  {
282  const auto * const dof_object = pr.first;
283  const auto proc_id = dof_object->processor_id();
284  if (proc_id == our_proc_id)
285  continue;
286 
287  push_data[proc_id].push_back(std::make_tuple(dof_object->id(), std::move(pr.second)));
288  }
289 
290  const auto & lm_mesh = mesh.getMesh();
291  std::unordered_map<processor_id_type, std::vector<const DofObject *>>
292  pid_to_dof_object_for_sending_back;
293 
294  auto action_functor =
295  [nodal,
296  our_proc_id,
297  &lm_mesh,
298  &dof_to_adreal,
299  &pid_to_dof_object_for_sending_back,
300  send_data_back](const processor_id_type pid, const std::vector<Datum> & sent_data)
301  {
302  mooseAssert(pid != our_proc_id, "We do not send messages to ourself here");
303  libmesh_ignore(our_proc_id);
304 
305  for (auto & [dof_id, weighted_gap] : sent_data)
306  {
307  const auto * const dof_object =
308  nodal ? static_cast<const DofObject *>(lm_mesh.node_ptr(dof_id))
309  : static_cast<const DofObject *>(lm_mesh.elem_ptr(dof_id));
310  mooseAssert(dof_object, "This should be non-null");
311  if (send_data_back)
312  pid_to_dof_object_for_sending_back[pid].push_back(dof_object);
313  auto & our_adreal = dof_to_adreal[dof_object];
314  our_adreal += weighted_gap;
315  }
316  };
317 
318  TIMPI::push_parallel_vector_data(communicator, push_data, action_functor);
319 
320  // Now send data back if requested
321  if (!send_data_back)
322  return;
323 
324  std::unordered_map<processor_id_type, std::vector<Datum>> push_back_data;
325 
326  for (const auto & [pid, dof_objects] : pid_to_dof_object_for_sending_back)
327  {
328  auto & pid_send_data = push_back_data[pid];
329  pid_send_data.reserve(dof_objects.size());
330  for (const DofObject * const dof_object : dof_objects)
331  {
332  const auto & our_adreal = libmesh_map_find(dof_to_adreal, dof_object);
333  pid_send_data.push_back(std::make_tuple(dof_object->id(), our_adreal));
334  }
335  }
336 
337  auto sent_back_action_functor =
338  [nodal, our_proc_id, &lm_mesh, &dof_to_adreal](const processor_id_type libmesh_dbg_var(pid),
339  const std::vector<Datum> & sent_data)
340  {
341  mooseAssert(pid != our_proc_id, "We do not send messages to ourself here");
342  libmesh_ignore(our_proc_id);
343 
344  for (auto & [dof_id, adreal] : sent_data)
345  {
346  const auto * const dof_object =
347  nodal ? static_cast<const DofObject *>(lm_mesh.node_ptr(dof_id))
348  : static_cast<const DofObject *>(lm_mesh.elem_ptr(dof_id));
349  mooseAssert(dof_object, "This should be non-null");
350  auto & our_adreal = dof_to_adreal[dof_object];
351  our_adreal = adreal;
352  }
353  };
354  TIMPI::push_parallel_vector_data(communicator, push_back_data, sent_back_action_functor);
355 }
MeshBase & mesh
void push_parallel_vector_data(const Communicator &comm, MapToVectors &&data, const ActionFunctor &act_on_data)
uint8_t processor_id_type
void libmesh_ignore(const Args &...)
DIE A HORRIBLE DEATH HERE typedef MPI_Comm communicator

◆ communicateVelocities()

template<typename T >
void Moose::Mortar::Contact::communicateVelocities ( std::unordered_map< const DofObject *, T > &  dof_map,
const MooseMesh mesh,
const bool  nodal,
const Parallel::Communicator &  communicator,
const bool  send_data_back 
)
inline

This function is used to communicate velocities across processes.

Parameters
dof_to_weighted_gapMap from degree of freedom to weighted (weak) gap
meshMesh used to locate nodes or elements
nodalWhether the element has Lagrange interpolation
communicatorProcess communicator
send_data_backWhether to send back data to a distributed constraint

Definition at line 70 of file MortarContactUtils.h.

Referenced by WeightedVelocitiesUserObject::finalize(), ComputeDynamicFrictionalForceLMMechanicalContact::incorrectEdgeDroppingPost(), ComputeFrictionalForceCartesianLMMechanicalContact::incorrectEdgeDroppingPost(), ComputeDynamicFrictionalForceLMMechanicalContact::post(), and ComputeFrictionalForceCartesianLMMechanicalContact::post().

75 {
76  libmesh_parallel_only(communicator);
77  const auto our_proc_id = communicator.rank();
78 
79  // We may have weighted velocity information that should go to other processes that own the dofs
80  using Datum = std::pair<dof_id_type, T>;
81  std::unordered_map<processor_id_type, std::vector<Datum>> push_data;
82 
83  for (auto & pr : dof_map)
84  {
85  const auto * const dof_object = pr.first;
86  const auto proc_id = dof_object->processor_id();
87  if (proc_id == our_proc_id)
88  continue;
89 
90  push_data[proc_id].push_back(std::make_pair(dof_object->id(), std::move(pr.second)));
91  }
92 
93  const auto & lm_mesh = mesh.getMesh();
94  std::unordered_map<processor_id_type, std::vector<const DofObject *>>
95  pid_to_dof_object_for_sending_back;
96 
97  auto action_functor =
98  [nodal, our_proc_id, &lm_mesh, &dof_map, &pid_to_dof_object_for_sending_back, send_data_back](
99  const processor_id_type pid, const std::vector<Datum> & sent_data)
100  {
101  mooseAssert(pid != our_proc_id, "We do not send messages to ourself here");
102  libmesh_ignore(our_proc_id);
103 
104  for (auto & pr : sent_data)
105  {
106  const auto dof_id = pr.first;
107  const auto * const dof_object =
108  nodal ? static_cast<const DofObject *>(lm_mesh.node_ptr(dof_id))
109  : static_cast<const DofObject *>(lm_mesh.elem_ptr(dof_id));
110  mooseAssert(dof_object, "This should be non-null");
111 
112  if (send_data_back)
113  pid_to_dof_object_for_sending_back[pid].push_back(dof_object);
114 
115  dof_map[dof_object][0] += pr.second[0];
116  dof_map[dof_object][1] += pr.second[1];
117  }
118  };
119 
120  TIMPI::push_parallel_vector_data(communicator, push_data, action_functor);
121 
122  // Now send data back if requested
123  if (!send_data_back)
124  return;
125 
126  std::unordered_map<processor_id_type, std::vector<Datum>> push_back_data;
127 
128  for (const auto & [pid, dof_objects] : pid_to_dof_object_for_sending_back)
129  {
130  auto & pid_send_data = push_back_data[pid];
131  pid_send_data.reserve(dof_objects.size());
132  for (const DofObject * const dof_object : dof_objects)
133  {
134  const auto & [tangent_one, tangent_two] = libmesh_map_find(dof_map, dof_object);
135  pid_send_data.push_back({dof_object->id(), {tangent_one, tangent_two}});
136  }
137  }
138 
139  auto sent_back_action_functor =
140  [nodal, our_proc_id, &lm_mesh, &dof_map](const processor_id_type libmesh_dbg_var(pid),
141  const std::vector<Datum> & sent_data)
142  {
143  mooseAssert(pid != our_proc_id, "We do not send messages to ourself here");
144  libmesh_ignore(our_proc_id);
145 
146  for (auto & [dof_id, tangents] : sent_data)
147  {
148  const auto * const dof_object =
149  nodal ? static_cast<const DofObject *>(lm_mesh.node_ptr(dof_id))
150  : static_cast<const DofObject *>(lm_mesh.elem_ptr(dof_id));
151  mooseAssert(dof_object, "This should be non-null");
152  auto & [our_tangent_one, our_tangent_two] = dof_map[dof_object];
153  our_tangent_one = tangents[0];
154  our_tangent_two = tangents[1];
155  }
156  };
157 
158  TIMPI::push_parallel_vector_data(communicator, push_back_data, sent_back_action_functor);
159 }
MeshBase & mesh
void push_parallel_vector_data(const Communicator &comm, MapToVectors &&data, const ActionFunctor &act_on_data)
uint8_t processor_id_type
void libmesh_ignore(const Args &...)
DIE A HORRIBLE DEATH HERE typedef MPI_Comm communicator