LCOV - code coverage report
Current view: top level - include/utils - Shuffle.h (source / functions) Hit Total Coverage
Test: idaholab/moose framework: 2bf808 Lines: 155 182 85.2 %
Date: 2025-07-17 01:28:37 Functions: 16 17 94.1 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : //* This file is part of the MOOSE framework
       2             : //* https://mooseframework.inl.gov
       3             : //*
       4             : //* All rights reserved, see COPYRIGHT for full restrictions
       5             : //* https://github.com/idaholab/moose/blob/master/COPYRIGHT
       6             : //*
       7             : //* Licensed under LGPL 2.1, please see LICENSE for details
       8             : //* https://www.gnu.org/licenses/lgpl-2.1.html
       9             : 
      10             : #pragma once
      11             : #include "MooseRandom.h"
      12             : #include "libmesh/communicator.h"
      13             : #include "libmesh/parallel.h"
      14             : #include "libmesh/parallel_sync.h"
      15             : #include "libmesh/libmesh_common.h"
      16             : #include <list>
      17             : #include <memory>
      18             : #include <iterator>
      19             : #include <algorithm>
      20             : 
      21             : namespace MooseUtils
      22             : {
      23             : ///@{
      24             : /**
      25             :  * Swap function for serial or distributed vector of data.
      26             :  * @param data The vector on which the values are to be swapped
      27             :  * @param idx0, idx1 The global indices to be swapped
      28             :  * @param comm_ptr Optional Communicator, if provided and running with multiple processors the
      29             :  *                 vector is assumed to be distributed
      30             :  */
      31             : template <typename T>
      32             : void swap(std::vector<T> & data,
      33             :           const std::size_t idx0,
      34             :           const std::size_t idx1,
      35             :           const libMesh::Parallel::Communicator & comm);
      36             : template <typename T>
      37             : void swap(std::vector<T> & data,
      38             :           const std::size_t idx0,
      39             :           const std::size_t idx1,
      40             :           const libMesh::Parallel::Communicator * comm_ptr = nullptr);
      41             : ///@}
      42             : 
      43             : ///@{
      44             : /**
      45             :  * Shuffle function for serial or distributed vector of data that shuffles in place.
      46             :  * @param data The vector on which the values are to be swapped
      47             :  * @param generator Random number generator to use for shuffle
      48             :  * @param seed_index (default: 0) The seed index to use for calls to randl
      49             :  * @param comm_ptr Optional Communicator, if provided and running with multiple processors the
      50             :  *                 vector is assumed to be distributed
      51             :  *
      52             :  * Both the serial and distributed version implement a Fisher-Yates shuffle
      53             :  * https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle
      54             :  *
      55             :  * NOTE: This distributed shuffle I have here does a parallel communication with each swap pair
      56             :  *       generated. I am certain that there are more efficient ways to shuffle a distributed vector,
      57             :  *       but there doesn't seem to be an algorithm in the literature (my search was not extensive).
      58             :  *
      59             :  *       The reason I came to this conclusion was because of a 2019 paper, which states the
      60             :  *       following (https://iopscience.iop.org/article/10.1088/1742-6596/1196/1/012035):
      61             :  *
      62             :  *           The study also says that the Fisher-Yates Shuffle can be developed in two ways,
      63             :  *           namely the algorithm's initial assumptions that allow for discrete uniform variables,
      64             :  *           and also with the avent of large core clusters and GPUs, there is an interest in making
      65             :  *           parallel versions of this algorithm.
      66             :  *
      67             :  *       This paper discusses the MergeShuffle (https://arxiv.org/abs/1508.03167), but that is a
      68             :  *       shared memory parallel algorithm.
      69             :  *
      70             :  *       Hence, if you want to become famous create a parallel Fisher-Yates algorithm for MPI.
      71             :  */
      72             : template <typename T>
      73             : void shuffle(std::vector<T> & data, MooseRandom & generator, const std::size_t seed_index = 0);
      74             : template <typename T>
      75             : void shuffle(std::vector<T> & data,
      76             :              MooseRandom & generator,
      77             :              const libMesh::Parallel::Communicator & comm);
      78             : template <typename T>
      79             : void shuffle(std::vector<T> & data,
      80             :              MooseRandom & generator,
      81             :              const std::size_t seed_index,
      82             :              const libMesh::Parallel::Communicator & comm);
      83             : template <typename T>
      84             : void shuffle(std::vector<T> & data,
      85             :              MooseRandom & generator,
      86             :              const std::size_t seed_index,
      87             :              const libMesh::Parallel::Communicator * comm_ptr);
      88             : ///@}
      89             : 
      90             : ///@{
      91             : /**
      92             :  * Randomly resample a vector of data, allowing a value to be repeated.
      93             :  * @param data The vector on which the values are to be swapped
      94             :  * @param generator Random number generator to use for shuffle
      95             :  * @param seed_index (default: 0) The seed index to use for calls to randl
      96             :  * @param comm_ptr Optional Communicator, if provided and running with multiple processors the
      97             :  *                 vector is assumed to be distributed
      98             :  */
      99             : template <typename T>
     100             : std::vector<T>
     101             : resample(const std::vector<T> & data, MooseRandom & generator, const std::size_t seed_index = 0);
     102             : template <typename T>
     103             : std::vector<T> resample(const std::vector<T> & data,
     104             :                         MooseRandom & generator,
     105             :                         const libMesh::Parallel::Communicator & comm);
     106             : template <typename T>
     107             : std::vector<T> resample(const std::vector<T> & data,
     108             :                         MooseRandom & generator,
     109             :                         const std::size_t seed_index,
     110             :                         const libMesh::Parallel::Communicator & comm);
     111             : template <typename T>
     112             : std::vector<T> resample(const std::vector<T> & data,
     113             :                         MooseRandom & generator,
     114             :                         const std::size_t seed_index,
     115             :                         const libMesh::Parallel::Communicator * comm_ptr);
     116             : //@}
     117             : 
     118             : ///@{
     119             : /**
     120             :  * Randomly resample a vector of data and apply a functor, allowing a value to be repeated.
     121             :  * @param data The vector on which the values are to be swapped
     122             :  * @param functor Functor to apply to each entry of the resampled vector
     123             :  * @param generator Random number generator to use for shuffle
     124             :  * @param seed_index (default: 0) The seed index to use for calls to randl
     125             :  * @param comm_ptr Optional Communicator, if provided and running with multiple processors the
     126             :  *                 vector is assumed to be distributed
     127             :  */
     128             : template <typename T, typename ActionFunctor>
     129             : void resampleWithFunctor(const std::vector<T> & data,
     130             :                          const ActionFunctor & functor,
     131             :                          MooseRandom & generator,
     132             :                          const std::size_t seed_index = 0);
     133             : template <typename T, typename ActionFunctor>
     134             : void resampleWithFunctor(const std::vector<T> & data,
     135             :                          const ActionFunctor & functor,
     136             :                          MooseRandom & generator,
     137             :                          const libMesh::Parallel::Communicator & comm);
     138             : template <typename T, typename ActionFunctor>
     139             : void resampleWithFunctor(const std::vector<T> & data,
     140             :                          const ActionFunctor & functor,
     141             :                          MooseRandom & generator,
     142             :                          const std::size_t seed_index,
     143             :                          const libMesh::Parallel::Communicator & comm);
     144             : template <typename T, typename ActionFunctor>
     145             : void resampleWithFunctor(const std::vector<T> & data,
     146             :                          const ActionFunctor & functor,
     147             :                          MooseRandom & generator,
     148             :                          const std::size_t seed_index,
     149             :                          const libMesh::Parallel::Communicator * comm_ptr);
     150             : //@}
     151             : }
     152             : 
     153             : template <typename T>
     154             : void
     155         117 : MooseUtils::swap(std::vector<T> & data,
     156             :                  const std::size_t idx0,
     157             :                  const std::size_t idx1,
     158             :                  const libMesh::Parallel::Communicator * comm_ptr)
     159             : {
     160         117 :   if (!comm_ptr || comm_ptr->size() == 1)
     161             :   {
     162             :     mooseAssert(idx0 < data.size(),
     163             :                 "idx0 (" << idx0 << ") out of range, data.size() is " << data.size());
     164             :     mooseAssert(idx1 < data.size(),
     165             :                 "idx1 (" << idx1 << ") out of range, data.size() is " << data.size());
     166          93 :     std::swap(data[idx0], data[idx1]);
     167             :   }
     168             : 
     169             :   else
     170             :   {
     171             :     // Size of the local input data
     172          24 :     const auto n_local = data.size();
     173          24 :     const auto rank = comm_ptr->rank();
     174             : 
     175             :     // Compute the global size of the vector
     176          24 :     std::size_t n_global = n_local;
     177          24 :     comm_ptr->sum(n_global);
     178             :     mooseAssert(idx0 < n_global,
     179             :                 "idx0 (" << idx0 << ") out of range, the global data size is " << n_global);
     180             :     mooseAssert(idx1 < n_global,
     181             :                 "idx1 (" << idx1 << ") out of range, the global data size is " << n_global);
     182             : 
     183             :     // Compute the vector data offsets, the scope cleans up the "n_local" vector
     184          24 :     std::vector<std::size_t> offsets(comm_ptr->size());
     185             :     {
     186          24 :       std::vector<std::size_t> local_sizes;
     187          24 :       comm_ptr->allgather(n_local, local_sizes);
     188          48 :       for (std::size_t i = 0; i < local_sizes.size() - 1; ++i)
     189          24 :         offsets[i + 1] = offsets[i] + local_sizes[i];
     190          24 :     }
     191             : 
     192             :     // Locate the rank and local index of the data to swap
     193          24 :     auto idx0_offset_iter = std::prev(std::upper_bound(offsets.begin(), offsets.end(), idx0));
     194          24 :     auto idx0_rank = std::distance(offsets.begin(), idx0_offset_iter);
     195          24 :     auto idx0_local_idx = idx0 - *idx0_offset_iter;
     196             : 
     197          24 :     auto idx1_offset_iter = std::prev(std::upper_bound(offsets.begin(), offsets.end(), idx1));
     198          24 :     auto idx1_rank = std::distance(offsets.begin(), idx1_offset_iter);
     199          24 :     auto idx1_local_idx = idx1 - *idx1_offset_iter;
     200             : 
     201             :     // The values, if any, needed from other rank
     202          24 :     std::unordered_map<processor_id_type, std::vector<std::size_t>> needs;
     203          24 :     if (idx0_rank != rank && idx1_rank == rank)
     204           6 :       needs[idx0_rank].push_back(idx0_local_idx);
     205          24 :     if (idx0_rank == rank && idx1_rank != rank)
     206           6 :       needs[idx1_rank].push_back(idx1_local_idx);
     207             : 
     208             :     // Collect the values needed by this processor
     209          24 :     std::unordered_map<processor_id_type, std::vector<T>> returns;
     210          24 :     auto return_functor =
     211          12 :         [&data, &returns](processor_id_type pid, const std::vector<std::size_t> & indices)
     212             :     {
     213          12 :       auto & returns_pid = returns[pid];
     214          24 :       for (auto idx : indices)
     215          12 :         returns_pid.push_back(data[idx]);
     216             :     };
     217          24 :     Parallel::push_parallel_vector_data(*comm_ptr, needs, return_functor);
     218             : 
     219             :     // Receive needed values from the others processors
     220          24 :     std::vector<T> incoming;
     221          48 :     auto recv_functor = [&incoming](processor_id_type /*pid*/, const std::vector<T> & values)
     222          12 :     { incoming = values; };
     223          24 :     Parallel::push_parallel_vector_data(*comm_ptr, returns, recv_functor);
     224             : 
     225          24 :     if (idx0_rank == rank && idx1_rank == rank)
     226           6 :       MooseUtils::swap(data, idx0_local_idx, idx1_local_idx);
     227             : 
     228          18 :     else if (idx0_rank == rank)
     229             :     {
     230             :       mooseAssert(incoming.size() == 1, "Only one value should be received");
     231           6 :       data[idx0_local_idx] = incoming[0];
     232             :     }
     233          12 :     else if (idx1_rank == rank)
     234             :     {
     235             :       mooseAssert(incoming.size() == 1, "Only one value should be received");
     236           6 :       data[idx1_local_idx] = incoming[0];
     237             :     }
     238          24 :   }
     239         117 : }
     240             : 
     241             : template <typename T>
     242             : void
     243          12 : MooseUtils::shuffle(std::vector<T> & data,
     244             :                     MooseRandom & generator,
     245             :                     const std::size_t seed_index,
     246             :                     const libMesh::Parallel::Communicator * comm_ptr)
     247             : {
     248             :   // REPLICATED data
     249          12 :   if (!comm_ptr || comm_ptr->size() == 1)
     250             :   {
     251           6 :     std::size_t n_global = data.size();
     252          60 :     for (std::size_t i = n_global - 1; i > 0; --i)
     253             :     {
     254          54 :       auto j = generator.randl(seed_index, 0, i);
     255          54 :       MooseUtils::swap(data, i, j, nullptr);
     256             :     }
     257             :   }
     258             : 
     259             :   // DISTRIBUTED data
     260             :   else
     261             :   {
     262             :     // Local/global size
     263           6 :     std::size_t n_local = data.size();
     264           6 :     std::size_t n_global = n_local;
     265           6 :     comm_ptr->sum(n_global);
     266             : 
     267             :     // Compute the vector data offsets, the scope cleans up the "n_local" vector
     268           6 :     std::vector<std::size_t> offsets(comm_ptr->size());
     269             :     {
     270           6 :       std::vector<std::size_t> local_sizes;
     271           6 :       comm_ptr->allgather(n_local, local_sizes);
     272          12 :       for (std::size_t i = 0; i < local_sizes.size() - 1; ++i)
     273           6 :         offsets[i + 1] = offsets[i] + local_sizes[i];
     274           6 :     }
     275             : 
     276             :     // Perform swaps
     277           6 :     auto rank = comm_ptr->rank();
     278          60 :     for (std::size_t idx0 = n_global - 1; idx0 > 0; --idx0)
     279             :     {
     280          54 :       auto idx1 = generator.randl(seed_index, 0, idx0);
     281             : 
     282             :       // Locate the rank and local index of the data to swap
     283          54 :       auto idx0_offset_iter = std::prev(std::upper_bound(offsets.begin(), offsets.end(), idx0));
     284          54 :       auto idx0_rank = std::distance(offsets.begin(), idx0_offset_iter);
     285          54 :       auto idx0_local_idx = idx0 - *idx0_offset_iter;
     286             : 
     287          54 :       auto idx1_offset_iter = std::prev(std::upper_bound(offsets.begin(), offsets.end(), idx1));
     288          54 :       auto idx1_rank = std::distance(offsets.begin(), idx1_offset_iter);
     289          54 :       auto idx1_local_idx = idx1 - *idx1_offset_iter;
     290             : 
     291             :       // The values, if any, needed from other rank
     292          54 :       std::unordered_map<processor_id_type, std::vector<std::size_t>> needs;
     293          54 :       if (idx0_rank != rank && idx1_rank == rank)
     294          12 :         needs[idx0_rank].push_back(idx0_local_idx);
     295          54 :       if (idx0_rank == rank && idx1_rank != rank)
     296          12 :         needs[idx1_rank].push_back(idx1_local_idx);
     297             : 
     298             :       // Collect the values needed by this processor
     299          54 :       std::unordered_map<processor_id_type, std::vector<T>> returns;
     300          54 :       auto return_functor =
     301          24 :           [&data, &returns](processor_id_type pid, const std::vector<std::size_t> & indices)
     302             :       {
     303          24 :         auto & returns_pid = returns[pid];
     304          48 :         for (auto idx : indices)
     305          24 :           returns_pid.push_back(data[idx]);
     306             :       };
     307          54 :       Parallel::push_parallel_vector_data(*comm_ptr, needs, return_functor);
     308             : 
     309             :       // Receive needed values from the others processors
     310          54 :       std::vector<T> incoming;
     311         102 :       auto recv_functor = [&incoming](processor_id_type /*pid*/, const std::vector<T> & values)
     312          24 :       { incoming = values; };
     313          54 :       Parallel::push_parallel_vector_data(*comm_ptr, returns, recv_functor);
     314             : 
     315          54 :       if (idx0_rank == rank && idx1_rank == rank)
     316          15 :         MooseUtils::swap(data, idx0_local_idx, idx1_local_idx);
     317             : 
     318          39 :       else if (idx0_rank == rank)
     319             :       {
     320             :         mooseAssert(incoming.size() == 1, "Only one value should be received");
     321          12 :         data[idx0_local_idx] = incoming[0];
     322             :       }
     323          27 :       else if (idx1_rank == rank)
     324             :       {
     325             :         mooseAssert(incoming.size() == 1, "Only one value should be received");
     326          12 :         data[idx1_local_idx] = incoming[0];
     327             :       }
     328             :     }
     329           6 :   }
     330          12 : }
     331             : 
     332             : template <typename T>
     333             : std::vector<T>
     334          12 : MooseUtils::resample(const std::vector<T> & data,
     335             :                      MooseRandom & generator,
     336             :                      const std::size_t seed_index,
     337             :                      const libMesh::Parallel::Communicator * comm_ptr)
     338             : {
     339             :   // Size of the local input data
     340          12 :   const std::size_t n_local = data.size();
     341             : 
     342             :   // Re-sampled data vector to be returned
     343          12 :   std::vector<T> replicate(n_local);
     344             : 
     345             :   // REPLICATED data
     346          12 :   if (!comm_ptr || comm_ptr->size() == 1)
     347             :   {
     348           6 :     replicate.resize(n_local);
     349          66 :     for (std::size_t j = 0; j < n_local; ++j)
     350             :     {
     351          60 :       auto index = generator.randl(seed_index, 0, n_local);
     352          60 :       replicate[j] = data[index];
     353             :     }
     354             :   }
     355             : 
     356             :   // DISTRIBUTED data
     357             :   else
     358             :   {
     359             :     // Compute the global size of the vector
     360           6 :     std::size_t n_global = n_local;
     361           6 :     comm_ptr->sum(n_global);
     362             : 
     363             :     // Compute the vector data offsets, the scope cleans up the "n_local" vector
     364           6 :     std::vector<std::size_t> offsets(comm_ptr->size());
     365             :     {
     366           6 :       std::vector<std::size_t> local_sizes;
     367           6 :       comm_ptr->allgather(n_local, local_sizes);
     368          12 :       for (std::size_t i = 0; i < local_sizes.size() - 1; ++i)
     369           6 :         offsets[i + 1] = offsets[i] + local_sizes[i];
     370           6 :     }
     371             : 
     372             :     // Advance the random number generator to the current offset
     373           6 :     const auto rank = comm_ptr->rank();
     374          18 :     for (std::size_t i = 0; i < offsets[rank]; ++i)
     375          12 :       generator.randl(seed_index, 0, n_global);
     376             : 
     377             :     // Compute the needs for this processor
     378           6 :     std::unordered_map<processor_id_type, std::vector<std::pair<std::size_t, std::size_t>>> needs;
     379          36 :     for (std::size_t i = 0; i < n_local; ++i)
     380             :     {
     381          30 :       const auto idx = generator.randl(seed_index, 0, n_global); // random global index
     382             : 
     383             :       // Locate the rank and local index of the data desired
     384          30 :       const auto idx_offset_iter = std::prev(std::upper_bound(offsets.begin(), offsets.end(), idx));
     385          30 :       const auto idx_rank = std::distance(offsets.begin(), idx_offset_iter);
     386          30 :       const auto idx_local_idx = idx - *idx_offset_iter;
     387             : 
     388             :       // Local available data can be inserted into the re-sample, non-local data is add the the
     389             :       // needs from other ranks
     390          30 :       if (idx_rank == rank)
     391          24 :         replicate[i] = data[idx_local_idx];
     392             :       else
     393           6 :         needs[idx_rank].emplace_back(idx_local_idx, i);
     394             :     }
     395             : 
     396             :     // Advance the random number generator to the end of the global vector
     397          24 :     for (std::size_t i = offsets[rank] + n_local; i < n_global; ++i)
     398          18 :       generator.randl(seed_index, 0, n_global);
     399             : 
     400             :     // Collect the values to be returned to the various processors
     401           6 :     std::unordered_map<processor_id_type, std::vector<std::pair<T, std::size_t>>> returns;
     402           6 :     auto return_functor =
     403           6 :         [&data, &returns](processor_id_type pid,
     404             :                           const std::vector<std::pair<std::size_t, std::size_t>> & indices)
     405             :     {
     406           6 :       auto & returns_pid = returns[pid];
     407          12 :       for (const auto & idx : indices)
     408           6 :         returns_pid.emplace_back(data[idx.first], idx.second);
     409             :     };
     410           6 :     Parallel::push_parallel_vector_data(*comm_ptr, needs, return_functor);
     411             : 
     412             :     // Receive resampled values from the various processors
     413           6 :     auto recv_functor =
     414          18 :         [&replicate](processor_id_type, const std::vector<std::pair<T, std::size_t>> & values)
     415             :     {
     416          12 :       for (const auto & value : values)
     417           6 :         replicate[value.second] = value.first;
     418             :     };
     419           6 :     Parallel::push_parallel_vector_data(*comm_ptr, returns, recv_functor);
     420           6 :   }
     421          24 :   return replicate;
     422           0 : }
     423             : 
     424             : template <typename T, typename ActionFunctor>
     425             : void
     426           2 : MooseUtils::resampleWithFunctor(const std::vector<T> & data,
     427             :                                 const ActionFunctor & functor,
     428             :                                 MooseRandom & generator,
     429             :                                 const std::size_t seed_index,
     430             :                                 const libMesh::Parallel::Communicator * comm_ptr)
     431             : {
     432           2 :   const std::size_t n_local = data.size();
     433             : 
     434           2 :   if (!comm_ptr || comm_ptr->size() == 1)
     435             :   {
     436          22 :     for (std::size_t j = 0; j < n_local; ++j)
     437             :     {
     438          20 :       auto index = generator.randl(seed_index, 0, n_local);
     439          20 :       functor(data[index]);
     440             :     }
     441             :   }
     442             :   else
     443             :   {
     444             :     // Compute the global size of the vector
     445           0 :     std::size_t n_global = n_local;
     446           0 :     comm_ptr->sum(n_global);
     447             : 
     448             :     // Compute the vector data offsets, the scope cleans up the "n_local" vector
     449           0 :     std::vector<std::size_t> offsets(comm_ptr->size());
     450             :     {
     451           0 :       std::vector<std::size_t> local_sizes;
     452           0 :       comm_ptr->allgather(n_local, local_sizes);
     453           0 :       for (std::size_t i = 0; i < local_sizes.size() - 1; ++i)
     454           0 :         offsets[i + 1] = offsets[i] + local_sizes[i];
     455           0 :     }
     456             : 
     457             :     // Advance the random number generator to the current offset
     458           0 :     const auto rank = comm_ptr->rank();
     459           0 :     for (std::size_t i = 0; i < offsets[rank]; ++i)
     460           0 :       generator.randl(seed_index, 0, n_global);
     461             : 
     462             :     // Compute the needs for this processor
     463           0 :     std::unordered_map<processor_id_type, std::vector<std::size_t>> indices;
     464           0 :     for (std::size_t i = 0; i < n_local; ++i)
     465             :     {
     466           0 :       const auto idx = generator.randl(seed_index, 0, n_global); // random global index
     467             : 
     468             :       // Locate the rank and local index of the data desired
     469           0 :       const auto idx_offset_iter = std::prev(std::upper_bound(offsets.begin(), offsets.end(), idx));
     470           0 :       const auto idx_rank = std::distance(offsets.begin(), idx_offset_iter);
     471           0 :       const auto idx_local_idx = idx - *idx_offset_iter;
     472             : 
     473             :       // Push back the index to appropriate rank
     474           0 :       indices[idx_rank].push_back(idx_local_idx);
     475             :     }
     476             : 
     477             :     // Advance the random number generator to the end of the global vector
     478           0 :     for (std::size_t i = offsets[rank] + n_local; i < n_global; ++i)
     479           0 :       generator.randl(seed_index, 0, n_global);
     480             : 
     481             :     // Send the indices to the appropriate rank and have the calculator do its work
     482           0 :     auto act_functor =
     483           0 :         [&functor, &data](processor_id_type /*pid*/, const std::vector<std::size_t> & indices)
     484             :     {
     485           0 :       for (const auto & idx : indices)
     486           0 :         functor(data[idx]);
     487             :     };
     488           0 :     Parallel::push_parallel_vector_data(*comm_ptr, indices, act_functor);
     489           0 :   }
     490           2 : }
     491             : 
     492             : template <typename T>
     493             : void
     494          40 : MooseUtils::swap(std::vector<T> & data,
     495             :                  const std::size_t idx0,
     496             :                  const std::size_t idx1,
     497             :                  const libMesh::Parallel::Communicator & comm)
     498             : {
     499          40 :   MooseUtils::swap<T>(data, idx0, idx1, &comm);
     500          40 : }
     501             : 
     502             : template <typename T>
     503             : void
     504           2 : MooseUtils::shuffle(std::vector<T> & data, MooseRandom & generator, const std::size_t seed_index)
     505             : {
     506           2 :   return MooseUtils::shuffle(data, generator, seed_index, nullptr);
     507             : }
     508             : 
     509             : template <typename T>
     510             : void
     511          10 : MooseUtils::shuffle(std::vector<T> & data,
     512             :                     MooseRandom & generator,
     513             :                     const libMesh::Parallel::Communicator & comm)
     514             : {
     515          10 :   return MooseUtils::shuffle(data, generator, 0, &comm);
     516             : }
     517             : 
     518             : template <typename T>
     519             : void
     520             : MooseUtils::shuffle(std::vector<T> & data,
     521             :                     MooseRandom & generator,
     522             :                     const std::size_t seed_index,
     523             :                     const libMesh::Parallel::Communicator & comm)
     524             : {
     525             :   return MooseUtils::shuffle(data, generator, seed_index, &comm);
     526             : }
     527             : 
     528             : template <typename T>
     529             : std::vector<T>
     530           2 : MooseUtils::resample(const std::vector<T> & data,
     531             :                      MooseRandom & generator,
     532             :                      const std::size_t seed_index)
     533             : {
     534           2 :   return MooseUtils::resample(data, generator, seed_index, nullptr);
     535             : }
     536             : 
     537             : template <typename T>
     538             : std::vector<T>
     539          10 : MooseUtils::resample(const std::vector<T> & data,
     540             :                      MooseRandom & generator,
     541             :                      const libMesh::Parallel::Communicator & comm)
     542             : {
     543          10 :   return MooseUtils::resample(data, generator, 0, &comm);
     544             : }
     545             : 
     546             : template <typename T>
     547             : std::vector<T>
     548             : MooseUtils::resample(const std::vector<T> & data,
     549             :                      MooseRandom & generator,
     550             :                      const std::size_t seed_index,
     551             :                      const libMesh::Parallel::Communicator & comm)
     552             : {
     553             :   return MooseUtils::resample(data, generator, seed_index, &comm);
     554             : }
     555             : 
     556             : template <typename T, typename ActionFunctor>
     557             : void
     558           2 : MooseUtils::resampleWithFunctor(const std::vector<T> & data,
     559             :                                 const ActionFunctor & functor,
     560             :                                 MooseRandom & generator,
     561             :                                 const std::size_t seed_index)
     562             : {
     563           2 :   return MooseUtils::resampleWithFunctor(data, functor, generator, seed_index, nullptr);
     564             : }
     565             : 
     566             : template <typename T, typename ActionFunctor>
     567             : void
     568             : MooseUtils::resampleWithFunctor(const std::vector<T> & data,
     569             :                                 const ActionFunctor & functor,
     570             :                                 MooseRandom & generator,
     571             :                                 const libMesh::Parallel::Communicator & comm)
     572             : {
     573             :   return MooseUtils::resampleWithFunctor(data, functor, generator, 0, &comm);
     574             : }
     575             : 
     576             : template <typename T, typename ActionFunctor>
     577             : void
     578             : MooseUtils::resampleWithFunctor(const std::vector<T> & data,
     579             :                                 const ActionFunctor & functor,
     580             :                                 MooseRandom & generator,
     581             :                                 const std::size_t seed_index,
     582             :                                 const libMesh::Parallel::Communicator & comm)
     583             : {
     584             :   return MooseUtils::resampleWithFunctor(data, functor, generator, seed_index, &comm);
     585             : }

Generated by: LCOV version 1.14