Line data Source code
1 : // The libMesh Finite Element Library.
2 : // Copyright (C) 2002-2025 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 :
4 : // This library is free software; you can redistribute it and/or
5 : // modify it under the terms of the GNU Lesser General Public
6 : // License as published by the Free Software Foundation; either
7 : // version 2.1 of the License, or (at your option) any later version.
8 :
9 : // This library is distributed in the hope that it will be useful,
10 : // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 : // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 : // Lesser General Public License for more details.
13 :
14 : // You should have received a copy of the GNU Lesser General Public
15 : // License along with this library; if not, write to the Free Software
16 : // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 :
18 :
19 :
20 : // Local includes
21 : #include "libmesh/distributed_vector.h"
22 :
23 : // libMesh includes
24 : #include "libmesh/dense_vector.h"
25 : #include "libmesh/dense_subvector.h"
26 : #include "libmesh/int_range.h"
27 : #include "libmesh/libmesh_common.h"
28 : #include "libmesh/tensor_tools.h"
29 :
30 : // TIMPI includes
31 : #include "timpi/parallel_implementation.h"
32 : #include "timpi/parallel_sync.h"
33 :
34 : // C++ includes
35 : #include <cstdlib> // *must* precede <cmath> for proper std:abs() on PGI, Sun Studio CC
36 : #include <cmath> // for std::abs
37 : #include <limits> // std::numeric_limits<T>::min()
38 :
39 :
40 : namespace libMesh
41 : {
42 :
43 :
44 :
45 : //--------------------------------------------------------------------------
46 : // DistributedVector methods
47 : template <typename T>
48 144 : T DistributedVector<T>::sum () const
49 : {
50 : // This function must be run on all processors at once
51 4 : parallel_object_only();
52 :
53 4 : libmesh_assert (this->initialized());
54 4 : libmesh_assert_equal_to (_values.size(), _local_size);
55 4 : libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
56 :
57 144 : T local_sum = 0.;
58 :
59 2206 : for (auto & val : _values)
60 2022 : local_sum += val;
61 :
62 144 : this->comm().sum(local_sum);
63 :
64 144 : return local_sum;
65 : }
66 :
67 :
68 :
69 : template <typename T>
70 710 : Real DistributedVector<T>::l1_norm () const
71 : {
72 : // This function must be run on all processors at once
73 20 : parallel_object_only();
74 :
75 20 : libmesh_assert (this->initialized());
76 20 : libmesh_assert_equal_to (_values.size(), _local_size);
77 20 : libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
78 :
79 710 : Real local_l1 = 0.;
80 :
81 10920 : for (auto & val : _values)
82 10420 : local_l1 += std::abs(val);
83 :
84 710 : this->comm().sum(local_l1);
85 :
86 710 : return local_l1;
87 : }
88 :
89 :
90 :
91 : template <typename T>
92 284 : Real DistributedVector<T>::l2_norm () const
93 : {
94 : // This function must be run on all processors at once
95 8 : parallel_object_only();
96 :
97 8 : libmesh_assert (this->initialized());
98 8 : libmesh_assert_equal_to (_values.size(), _local_size);
99 8 : libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
100 :
101 284 : Real local_l2 = 0.;
102 :
103 4368 : for (auto & val : _values)
104 4168 : local_l2 += TensorTools::norm_sq(val);
105 :
106 284 : this->comm().sum(local_l2);
107 :
108 292 : return std::sqrt(local_l2);
109 : }
110 :
111 :
112 :
113 : template <typename T>
114 142 : Real DistributedVector<T>::linfty_norm () const
115 : {
116 : // This function must be run on all processors at once
117 4 : parallel_object_only();
118 :
119 4 : libmesh_assert (this->initialized());
120 4 : libmesh_assert_equal_to (_values.size(), _local_size);
121 4 : libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
122 :
123 142 : Real local_linfty = 0.;
124 :
125 2184 : for (auto & val : _values)
126 2042 : local_linfty = std::max(local_linfty,
127 4104 : static_cast<Real>(std::abs(val))
128 42 : ); // Note we static_cast so that both
129 : // types are the same, as required
130 : // by std::max
131 :
132 142 : this->comm().max(local_linfty);
133 :
134 142 : return local_linfty;
135 : }
136 :
137 :
138 :
139 : template <typename T>
140 142 : NumericVector<T> & DistributedVector<T>::operator += (const NumericVector<T> & v)
141 : {
142 4 : libmesh_assert (this->closed());
143 4 : libmesh_assert (this->initialized());
144 4 : libmesh_assert_equal_to (_values.size(), _local_size);
145 4 : libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
146 :
147 142 : add(1., v);
148 :
149 142 : return *this;
150 : }
151 :
152 :
153 :
154 : template <typename T>
155 284 : NumericVector<T> & DistributedVector<T>::operator -= (const NumericVector<T> & v)
156 : {
157 8 : libmesh_assert (this->closed());
158 8 : libmesh_assert (this->initialized());
159 8 : libmesh_assert_equal_to (_values.size(), _local_size);
160 8 : libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
161 :
162 284 : add(-1., v);
163 :
164 284 : return *this;
165 : }
166 :
167 :
168 :
169 : template <typename T>
170 142 : NumericVector<T> & DistributedVector<T>::operator *= (const NumericVector<T> & v)
171 : {
172 4 : libmesh_assert_equal_to(size(), v.size());
173 :
174 4 : const DistributedVector<T> & v_vec = cast_ref<const DistributedVector<T> &>(v);
175 :
176 2184 : for (auto i : index_range(_values))
177 2106 : _values[i] *= v_vec._values[i];
178 :
179 142 : return *this;
180 : }
181 :
182 :
183 :
184 : template <typename T>
185 142 : NumericVector<T> & DistributedVector<T>::operator /= (const NumericVector<T> & v)
186 : {
187 4 : libmesh_assert_equal_to(size(), v.size());
188 :
189 4 : const DistributedVector<T> & v_vec = cast_ref<const DistributedVector<T> &>(v);
190 :
191 2184 : for (auto i : index_range(_values))
192 2106 : _values[i] /= v_vec._values[i];
193 :
194 142 : return *this;
195 : }
196 :
197 :
198 :
199 :
200 : template <typename T>
201 142 : void DistributedVector<T>::reciprocal()
202 : {
203 2184 : for (auto & val : _values)
204 : {
205 : // Don't divide by zero
206 42 : libmesh_assert_not_equal_to (val, T(0));
207 :
208 2042 : val = 1. / val;
209 : }
210 142 : }
211 :
212 :
213 :
214 :
215 : template <typename T>
216 0 : void DistributedVector<T>::conjugate()
217 : {
218 : // Replace values by complex conjugate
219 0 : for (auto & val : _values)
220 0 : val = libmesh_conj(val);
221 0 : }
222 :
223 :
224 :
225 :
226 :
227 : template <typename T>
228 142 : void DistributedVector<T>::add (const T v)
229 : {
230 4 : libmesh_assert (this->initialized());
231 4 : libmesh_assert_equal_to (_values.size(), _local_size);
232 4 : libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
233 :
234 2184 : for (auto & val : _values)
235 2022 : val += v;
236 142 : }
237 :
238 :
239 :
240 : template <typename T>
241 0 : void DistributedVector<T>::add (const NumericVector<T> & v)
242 : {
243 0 : libmesh_assert (this->initialized());
244 0 : libmesh_assert_equal_to (_values.size(), _local_size);
245 0 : libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
246 :
247 0 : add (1., v);
248 0 : }
249 :
250 :
251 :
252 : template <typename T>
253 568 : void DistributedVector<T>::add (const T a, const NumericVector<T> & v_in)
254 : {
255 16 : libmesh_assert (this->initialized());
256 16 : libmesh_assert_equal_to (_values.size(), _local_size);
257 16 : libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
258 :
259 : // Make sure the NumericVector passed in is really a DistributedVector
260 16 : const DistributedVector<T> * v = cast_ptr<const DistributedVector<T> *>(&v_in);
261 16 : libmesh_error_msg_if(!v, "Cannot add different types of NumericVectors.");
262 :
263 8736 : for (auto i : index_range(_values))
264 8424 : _values[i] += a * v->_values[i];
265 568 : }
266 :
267 :
268 :
269 : template <typename T>
270 142 : void DistributedVector<T>::scale (const T factor)
271 : {
272 4 : libmesh_assert (this->initialized());
273 4 : libmesh_assert_equal_to (_values.size(), _local_size);
274 4 : libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
275 :
276 2184 : for (auto & val : _values)
277 2022 : val *= factor;
278 142 : }
279 :
280 : template <typename T>
281 0 : void DistributedVector<T>::abs()
282 : {
283 0 : libmesh_assert (this->initialized());
284 0 : libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
285 :
286 0 : for (auto & val : _values)
287 0 : val = std::abs(val);
288 0 : }
289 :
290 :
291 :
292 :
293 :
294 : template <typename T>
295 144 : T DistributedVector<T>::dot (const NumericVector<T> & V) const
296 : {
297 : // This function must be run on all processors at once
298 4 : parallel_object_only();
299 :
300 : // Make sure the NumericVector passed in is really a DistributedVector
301 4 : const DistributedVector<T> * v = cast_ptr<const DistributedVector<T> *>(&V);
302 :
303 : // Make sure that the two vectors are distributed in the same way.
304 4 : libmesh_assert_equal_to ( this->first_local_index(), v->first_local_index() );
305 4 : libmesh_assert_equal_to ( this->last_local_index(), v->last_local_index() );
306 :
307 : // The result of dotting together the local parts of the vector.
308 144 : T local_dot = 0;
309 :
310 2206 : for (auto i : index_range(_values))
311 2064 : local_dot += this->_values[i] * v->_values[i];
312 :
313 : // The local dot products are now summed via MPI
314 144 : this->comm().sum(local_dot);
315 :
316 144 : return local_dot;
317 : }
318 :
319 :
320 :
321 : template <typename T>
322 : NumericVector<T> &
323 142 : DistributedVector<T>::operator = (const T s)
324 : {
325 4 : libmesh_assert (this->initialized());
326 4 : libmesh_assert_equal_to (_values.size(), _local_size);
327 4 : libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
328 :
329 2184 : for (auto & val : _values)
330 2042 : val = s;
331 :
332 142 : return *this;
333 : }
334 :
335 :
336 :
337 : template <typename T>
338 : NumericVector<T> &
339 355 : DistributedVector<T>::operator = (const NumericVector<T> & v_in)
340 : {
341 : // Make sure the NumericVector passed in is really a DistributedVector
342 10 : const DistributedVector<T> * v = cast_ptr<const DistributedVector<T> *>(&v_in);
343 :
344 355 : *this = *v;
345 :
346 355 : return *this;
347 : }
348 :
349 :
350 :
351 : template <typename T>
352 : DistributedVector<T> &
353 426 : DistributedVector<T>::operator = (const DistributedVector<T> & v)
354 : {
355 426 : this->_is_initialized = v._is_initialized;
356 426 : this->_is_closed = v._is_closed;
357 :
358 426 : _global_size = v._global_size;
359 426 : _local_size = v._local_size;
360 426 : _first_local_index = v._first_local_index;
361 426 : _last_local_index = v._last_local_index;
362 :
363 426 : if (v.local_size() == this->local_size())
364 426 : _values = v._values;
365 :
366 : else
367 0 : libmesh_error_msg("v.local_size() = " << v.local_size() << " must be equal to this->local_size() = " << this->local_size());
368 :
369 426 : return *this;
370 : }
371 :
372 :
373 :
374 : template <typename T>
375 : NumericVector<T> &
376 0 : DistributedVector<T>::operator = (const std::vector<T> & v)
377 : {
378 0 : libmesh_assert (this->initialized());
379 0 : libmesh_assert_equal_to (_values.size(), _local_size);
380 0 : libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
381 :
382 0 : if (v.size() == local_size())
383 0 : _values = v;
384 :
385 0 : else if (v.size() == size())
386 0 : for (auto i : index_range(*this))
387 0 : _values[i-first_local_index()] = v[i];
388 :
389 : else
390 0 : libmesh_error_msg("Incompatible sizes in DistributedVector::operator=");
391 :
392 0 : return *this;
393 : }
394 :
395 :
396 :
397 : template <typename T>
398 : inline
399 1986 : void DistributedVector<T>::close ()
400 : {
401 56 : libmesh_assert (this->initialized());
402 :
403 56 : parallel_object_only();
404 :
405 1986 : bool have_remote_values = !_remote_values.empty();
406 1986 : this->comm().max(have_remote_values);
407 :
408 1986 : if (have_remote_values)
409 : {
410 138 : bool someone_is_setting = (_unclosed_state == SET_VALUES);
411 138 : this->comm().max(someone_is_setting);
412 :
413 : #ifndef NDEBUG
414 : // If *I* have remote values, I must know what to do with them.
415 4 : if (!_remote_values.empty())
416 2 : libmesh_assert(_unclosed_state == SET_VALUES ||
417 : _unclosed_state == ADD_VALUES);
418 :
419 : // If *anyone* had remote values, we must be doing something,
420 : // and all ranks must agree on what we're doing.
421 4 : bool someone_is_adding = (_unclosed_state == ADD_VALUES);
422 4 : this->comm().max(someone_is_adding);
423 :
424 4 : libmesh_assert_not_equal_to(someone_is_setting, someone_is_adding);
425 : #endif
426 :
427 : // We want to traverse in id order later, but we can't compare
428 : // values with default < in the case where Number==complex.
429 138 : std::sort(_remote_values.begin(), _remote_values.end(),
430 40 : [](auto a, auto b)
431 40 : { return a.first < b.first; });
432 :
433 8 : std::vector<numeric_index_type> last_local_indices;
434 138 : this->comm().allgather(_last_local_index, last_local_indices);
435 :
436 : std::map<processor_id_type, std::vector<std::pair<numeric_index_type,T>>>
437 4 : updates_to_send;
438 :
439 138 : processor_id_type p = 0;
440 1940 : for (auto [i, v] : _remote_values)
441 : {
442 1944 : while (i >= last_local_indices[p])
443 : {
444 2 : libmesh_assert_less(p, this->n_processors());
445 118 : ++p;
446 : }
447 1802 : updates_to_send[p].emplace_back(i, v);
448 : }
449 4 : _remote_values.clear();
450 :
451 182 : auto action_functor =
452 114 : [this, someone_is_setting]
453 : (processor_id_type,
454 4 : const std::vector<std::pair<numeric_index_type,T>> & incoming_values)
455 : {
456 1920 : for (auto [i, v] : incoming_values)
457 : {
458 22 : libmesh_assert_greater_equal(i, _first_local_index);
459 22 : libmesh_assert_less(i, _last_local_index);
460 1802 : if (someone_is_setting)
461 1824 : _values[i-_first_local_index] = v;
462 : else
463 0 : _values[i-_first_local_index] += v;
464 : }
465 : };
466 :
467 : Parallel::push_parallel_vector_data
468 138 : (this->comm(), updates_to_send, action_functor);
469 :
470 138 : _unclosed_state = DO_NOTHING;
471 : }
472 :
473 1986 : this->_is_closed = true;
474 1986 : }
475 :
476 :
477 :
478 : template <typename T>
479 0 : void DistributedVector<T>::localize (NumericVector<T> & v_local_in) const
480 :
481 : {
482 0 : libmesh_assert (this->initialized());
483 0 : libmesh_assert_equal_to (_values.size(), _local_size);
484 0 : libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
485 :
486 0 : DistributedVector<T> * v_local = cast_ptr<DistributedVector<T> *>(&v_local_in);
487 :
488 0 : v_local->_first_local_index = 0;
489 :
490 0 : v_local->_global_size =
491 0 : v_local->_local_size =
492 0 : v_local->_last_local_index = size();
493 :
494 0 : v_local->_is_initialized =
495 0 : v_local->_is_closed = true;
496 :
497 : // Call localize on the vector's values. This will help
498 : // prevent code duplication
499 0 : localize (v_local->_values);
500 :
501 : #ifndef LIBMESH_HAVE_MPI
502 :
503 : libmesh_assert_equal_to (local_size(), size());
504 :
505 : #endif
506 0 : }
507 :
508 :
509 :
510 : template <typename T>
511 0 : void DistributedVector<T>::localize (NumericVector<T> & v_local_in,
512 : const std::vector<numeric_index_type> &) const
513 : {
514 0 : libmesh_assert (this->initialized());
515 0 : libmesh_assert_equal_to (_values.size(), _local_size);
516 0 : libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
517 :
518 : // TODO: We don't yet support the send list; this is inefficient:
519 0 : localize (v_local_in);
520 0 : }
521 :
522 :
523 :
524 : template <typename T>
525 142 : void DistributedVector<T>::localize (std::vector<T> & v_local,
526 : const std::vector<numeric_index_type> & indices) const
527 : {
528 : // Resize v_local so there is enough room to hold all the local values.
529 146 : v_local.resize(indices.size());
530 :
531 : // We need to know who has the values we want, so get everyone's _local_size
532 8 : std::vector<numeric_index_type> local_sizes;
533 142 : this->comm().allgather (_local_size, local_sizes);
534 :
535 : // Make a vector of partial sums of local sizes
536 150 : std::vector<numeric_index_type> local_size_sums(this->n_processors());
537 142 : local_size_sums[0] = local_sizes[0];
538 1386 : for (auto i : IntRange<numeric_index_type>(1, local_sizes.size()))
539 1256 : local_size_sums[i] = local_size_sums[i-1] + local_sizes[i];
540 :
541 : // We now fill in 'requested_ids' based on the indices. Also keep
542 : // track of the local index (in the indices vector) for each of
543 : // these, since we need that when unpacking.
544 : std::map<processor_id_type, std::vector<numeric_index_type>>
545 8 : requested_ids, local_requested_ids;
546 :
547 : // We'll use this typedef a couple of times below.
548 : typedef typename std::vector<numeric_index_type>::iterator iter_t;
549 :
550 : // For each index in indices, determine which processor it is on.
551 : // This is an O(N*log(p)) algorithm that uses std::upper_bound().
552 : // Note: upper_bound() returns an iterator to the first entry which is
553 : // greater than the given value.
554 14002 : for (auto i : index_range(indices))
555 : {
556 13860 : iter_t ub = std::upper_bound(local_size_sums.begin(),
557 : local_size_sums.end(),
558 : indices[i]);
559 :
560 13860 : processor_id_type on_proc = cast_int<processor_id_type>
561 80 : (std::distance(local_size_sums.begin(), ub));
562 :
563 13860 : requested_ids[on_proc].push_back(indices[i]);
564 13860 : local_requested_ids[on_proc].push_back(i);
565 : }
566 :
567 398 : auto gather_functor =
568 1370 : [this]
569 : (processor_id_type, const std::vector<dof_id_type> & ids,
570 16 : std::vector<T> & values)
571 : {
572 : // The first send/receive we did was for indices, the second one will be
573 : // for corresponding floating point values, so create storage for that now...
574 16 : const std::size_t ids_size = ids.size();
575 1386 : values.resize(ids_size);
576 :
577 15246 : for (std::size_t i=0; i != ids_size; i++)
578 : {
579 : // The index of the requested value
580 13860 : const numeric_index_type requested_index = ids[i];
581 :
582 : // Transform into local numbering, and get requested value.
583 14020 : values[i] = _values[requested_index - _first_local_index];
584 : }
585 : };
586 :
587 314 : auto action_functor =
588 1370 : [& v_local, & local_requested_ids]
589 : (processor_id_type pid,
590 : const std::vector<dof_id_type> &,
591 16 : const std::vector<T> & values)
592 : {
593 : // Now write the received values to the appropriate place(s) in v_local
594 15246 : for (auto i : index_range(values))
595 : {
596 80 : libmesh_assert(local_requested_ids.count(pid));
597 80 : libmesh_assert_less(i, local_requested_ids[pid].size());
598 :
599 : // Get the index in v_local where this value needs to be inserted.
600 13860 : const numeric_index_type local_requested_index =
601 13860 : local_requested_ids[pid][i];
602 :
603 : // Actually set the value in v_local
604 14020 : v_local[local_requested_index] = values[i];
605 : }
606 : };
607 :
608 4 : const T * ex = nullptr;
609 : Parallel::pull_parallel_vector_data
610 142 : (this->comm(), requested_ids, gather_functor, action_functor, ex);
611 142 : }
612 :
613 :
614 :
615 : template <typename T>
616 0 : void DistributedVector<T>::localize (const numeric_index_type first_local_idx,
617 : const numeric_index_type last_local_idx,
618 : const std::vector<numeric_index_type> & send_list)
619 : {
620 : // Only good for serial vectors
621 0 : libmesh_assert_equal_to (this->size(), this->local_size());
622 0 : libmesh_assert_greater (last_local_idx, first_local_idx);
623 0 : libmesh_assert_less_equal (send_list.size(), this->size());
624 0 : libmesh_assert_less (last_local_idx, this->size());
625 :
626 0 : const numeric_index_type my_size = this->size();
627 0 : const numeric_index_type my_local_size = (last_local_idx - first_local_idx + 1);
628 :
629 : // Don't bother for serial cases
630 0 : if ((first_local_idx == 0) &&
631 0 : (my_local_size == my_size))
632 0 : return;
633 :
634 :
635 : // Build a parallel vector, initialize it with the local
636 : // parts of (*this)
637 0 : DistributedVector<T> parallel_vec(this->comm());
638 :
639 0 : parallel_vec.init (my_size, my_local_size, true, PARALLEL);
640 :
641 : // Copy part of *this into the parallel_vec
642 0 : for (numeric_index_type i=first_local_idx; i<=last_local_idx; i++)
643 0 : parallel_vec._values[i-first_local_idx] = _values[i];
644 :
645 : // localize like normal
646 0 : parallel_vec.localize (*this, send_list);
647 0 : }
648 :
649 :
650 :
651 : template <typename T>
652 284 : void DistributedVector<T>::localize (std::vector<T> & v_local) const
653 : {
654 : // This function must be run on all processors at once
655 8 : parallel_object_only();
656 :
657 8 : libmesh_assert (this->initialized());
658 8 : libmesh_assert_equal_to (_values.size(), _local_size);
659 8 : libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
660 :
661 284 : v_local = this->_values;
662 :
663 284 : this->comm().allgather (v_local);
664 :
665 : #ifndef LIBMESH_HAVE_MPI
666 : libmesh_assert_equal_to (local_size(), size());
667 : #endif
668 284 : }
669 :
670 :
671 :
672 : template <typename T>
673 284 : void DistributedVector<T>::localize_to_one (std::vector<T> & v_local,
674 : const processor_id_type pid) const
675 : {
676 : // This function must be run on all processors at once
677 8 : parallel_object_only();
678 :
679 8 : libmesh_assert (this->initialized());
680 8 : libmesh_assert_equal_to (_values.size(), _local_size);
681 8 : libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
682 :
683 284 : v_local = this->_values;
684 :
685 284 : this->comm().gather (pid, v_local);
686 :
687 : #ifndef LIBMESH_HAVE_MPI
688 : libmesh_assert_equal_to (local_size(), size());
689 : #endif
690 284 : }
691 :
692 :
693 :
694 : template <typename T>
695 0 : void DistributedVector<T>::pointwise_mult (const NumericVector<T> &,
696 : const NumericVector<T> &)
697 : //void DistributedVector<T>::pointwise_mult (const NumericVector<T> & vec1,
698 : // const NumericVector<T> & vec2)
699 : {
700 0 : libmesh_not_implemented();
701 : }
702 :
703 : template <typename T>
704 0 : void DistributedVector<T>::pointwise_divide (const NumericVector<T> &,
705 : const NumericVector<T> &)
706 : {
707 0 : libmesh_not_implemented();
708 : }
709 :
710 : //--------------------------------------------------------------
711 : // Explicit instantiations
712 : template class LIBMESH_EXPORT DistributedVector<Number>;
713 :
714 : } // namespace libMesh
|