double_multi_vector.cc
Go to the documentation of this file.
1 //LIC// ====================================================================
2 //LIC// This file forms part of oomph-lib, the object-oriented,
3 //LIC// multi-physics finite-element library, available
4 //LIC// at http://www.oomph-lib.org.
5 //LIC//
6 //LIC// Version 1.0; svn revision $LastChangedRevision$
7 //LIC//
8 //LIC// $LastChangedDate$
9 //LIC//
10 //LIC// Copyright (C) 2006-2016 Matthias Heil and Andrew Hazel
11 //LIC//
12 //LIC// This library is free software; you can redistribute it and/or
13 //LIC// modify it under the terms of the GNU Lesser General Public
14 //LIC// License as published by the Free Software Foundation; either
15 //LIC// version 2.1 of the License, or (at your option) any later version.
16 //LIC//
17 //LIC// This library is distributed in the hope that it will be useful,
18 //LIC// but WITHOUT ANY WARRANTY; without even the implied warranty of
19 //LIC// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 //LIC// Lesser General Public License for more details.
21 //LIC//
22 //LIC// You should have received a copy of the GNU Lesser General Public
23 //LIC// License along with this library; if not, write to the Free Software
24 //LIC// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 //LIC// 02110-1301 USA.
26 //LIC//
27 //LIC// The authors may be contacted at oomph-lib@maths.man.ac.uk.
28 //LIC//
29 //LIC//====================================================================
30 #include "double_multi_vector.h"
31 #include "matrices.h"
32 
33 namespace oomph
34 {
35  /// The contents of the vector are redistributed to match the new
36  /// distribution. In a non-MPI rebuild this method works, but does nothing.
37  /// \b NOTE 1: The current distribution and the new distribution must have
38  /// the same number of global rows.
39  /// \b NOTE 2: The current distribution and the new distribution must have
40  /// the same Communicator.
42  const LinearAlgebraDistribution* const& dist_pt)
43  {
44 #ifdef OOMPH_HAS_MPI
45 #ifdef PARANOID
46  if (!Internal_values)
47  {
48  // if this vector does not own the double* values then it cannot be
49  // distributed.
50  // note: this is not stictly necessary - would just need to be careful
51  // with delete[] below.
52  std::ostringstream error_message;
53  error_message <<
54  "This multi vector does not own its data (i.e. data has been "
55  << "passed in via set_external_values() and therefore "
56  << "cannot be redistributed";
57  throw OomphLibError(error_message.str(),
58  OOMPH_CURRENT_FUNCTION,
59  OOMPH_EXCEPTION_LOCATION);
60  }
61  // paranoid check that the nrows for both distributions is the
62  // same
63  if (dist_pt->nrow() != this->nrow())
64  {
65  std::ostringstream error_message;
66  error_message << "The number of global rows in the new distribution ("
67  << dist_pt->nrow() << ") is not equal to the number"
68  << " of global rows in the current distribution ("
69  << this->nrow() << ").\n";
70  throw OomphLibError(error_message.str(),
71  OOMPH_CURRENT_FUNCTION,
72  OOMPH_EXCEPTION_LOCATION);
73  }
74  // paranoid check that the current distribution and the new distribution
75  // have the same Communicator
76  OomphCommunicator temp_comm(*dist_pt->communicator_pt());
77  if (!(temp_comm == *this->distribution_pt()->communicator_pt()))
78  {
79  std::ostringstream error_message;
80  error_message << "The new distribution and the current distribution must "
81  << "have the same communicator.";
82  throw OomphLibError(error_message.str(),
83  OOMPH_CURRENT_FUNCTION,
84  OOMPH_EXCEPTION_LOCATION);
85  }
86 #endif
87 
88  // check the distributions are not the same
89  if (!((*this->distribution_pt()) == *dist_pt))
90  {
91  //Cache the number of vectors
92  const unsigned n_vector = this->Nvector;
93 
94  // get the rank and the number of processors
95  int my_rank = this->distribution_pt()->communicator_pt()->my_rank();
96  int nproc = this->distribution_pt()->communicator_pt()->nproc();
97 
98  // if both vectors are distributed
99  if (this->distributed() && dist_pt->distributed())
100  {
101 
102  // new nrow_local and first_row data
103  Vector<unsigned> new_first_row_data(nproc);
104  Vector<unsigned> new_nrow_local_data(nproc);
105  Vector<unsigned> current_first_row_data(nproc);
106  Vector<unsigned> current_nrow_local_data(nproc);
107  for (int i = 0; i < nproc; i++)
108  {
109  new_first_row_data[i] = dist_pt->first_row(i);
110  new_nrow_local_data[i] = dist_pt->nrow_local(i);
111  current_first_row_data[i] = this->first_row(i);
112  current_nrow_local_data[i] = this->nrow_local(i);
113  }
114 
115  // compute which local rows are expected to be received from each
116  // processor / sent to each processor
117  Vector<unsigned> new_first_row_for_proc(nproc);
118  Vector<unsigned> new_nrow_local_for_proc(nproc);
119  Vector<unsigned> new_first_row_from_proc(nproc);
120  Vector<unsigned> new_nrow_local_from_proc(nproc);
121 
122  // for every processor compute first_row and nrow_local that will
123  // will sent and received by this processor
124  for (int p = 0; p < nproc; p++)
125  {
126  // start with data to be sent
127  if ((new_first_row_data[p] < (current_first_row_data[my_rank] +
128  current_nrow_local_data[my_rank])) &&
129  (current_first_row_data[my_rank] < (new_first_row_data[p] +
130  new_nrow_local_data[p])))
131  {
132  new_first_row_for_proc[p] =
133  std::max(current_first_row_data[my_rank],
134  new_first_row_data[p]);
135  new_nrow_local_for_proc[p] =
136  std::min((current_first_row_data[my_rank] +
137  current_nrow_local_data[my_rank]),
138  (new_first_row_data[p] +
139  new_nrow_local_data[p])) - new_first_row_for_proc[p];
140  }
141 
142  // and data to be received
143  if ((new_first_row_data[my_rank] < (current_first_row_data[p] +
144  current_nrow_local_data[p]))
145  && (current_first_row_data[p] < (new_first_row_data[my_rank] +
146  new_nrow_local_data[my_rank])))
147  {
148  new_first_row_from_proc[p] =
149  std::max(current_first_row_data[p],
150  new_first_row_data[my_rank]);
151  new_nrow_local_from_proc[p] =
152  std::min((current_first_row_data[p] +
153  current_nrow_local_data[p]),
154  (new_first_row_data[my_rank] +
155  new_nrow_local_data[my_rank]))-new_first_row_from_proc[p];
156  }
157  }
158 
159  // Storage for the new data
160  double **temp_data = new double*[n_vector];
161  double *contiguous_temp_data =
162  new double[n_vector*new_nrow_local_data[my_rank]];
163  for(unsigned v=0;v<n_vector;++v)
164  {
165  temp_data[v] = &contiguous_temp_data[v*new_nrow_local_data[my_rank]];
166  }
167 
168  // "send to self" or copy Data that does not need to be sent else where
169  // to temp_data
170  if (new_nrow_local_for_proc[my_rank] != 0)
171  {
172  unsigned j = new_first_row_for_proc[my_rank] -
173  current_first_row_data[my_rank];
174  unsigned k = new_first_row_for_proc[my_rank] -
175  new_first_row_data[my_rank];
176  for (unsigned i = 0; i < new_nrow_local_for_proc[my_rank]; i++)
177  {
178  for(unsigned v=0;v<n_vector;++v)
179  {
180  temp_data[v][k + i] = Values[v][j +i];
181  }
182  }
183  }
184 
185  // send and receive circularly
186  for (int p = 1; p < nproc; p++)
187  {
188  // next processor to send to
189  unsigned dest_p = (my_rank + p)%nproc;
190 
191  // next processor to receive from
192  unsigned source_p = (nproc + my_rank - p)%nproc;
193 
194  // send and receive the value
195  MPI_Status status;
196  for(unsigned v=0;v<n_vector;v++)
197  {
198  MPI_Sendrecv(Values[v] + new_first_row_for_proc[dest_p] -
199  current_first_row_data[my_rank],
200  new_nrow_local_for_proc[dest_p],
201  MPI_DOUBLE,dest_p,1,
202  temp_data[v] + new_first_row_from_proc[source_p] -
203  new_first_row_data[my_rank],
204  new_nrow_local_from_proc[source_p],
205  MPI_DOUBLE,source_p,1,
206  this->distribution_pt()->communicator_pt()->mpi_comm(),
207  &status);
208  }
209  }
210 
211  // copy from temp data to Values_pt
212  delete[] Values[0]; delete[] Values;
213  Values = temp_data;
214  }
215  // if this vector is distributed but the new distributed is global
216  else if (this->distributed() && !dist_pt->distributed())
217  {
218 
219  // copy existing Values_pt to temp_data
220  unsigned n_local_data = this->nrow_local();
221  double **temp_data = new double*[n_vector];
222  //New continguous data
223  double *contiguous_temp_data = new double[n_vector*n_local_data];
224  for(unsigned v=0;v<n_vector;++v)
225  {
226  temp_data[v] = &contiguous_temp_data[v*n_local_data];
227  for (unsigned i = 0; i < n_local_data; i++)
228  {
229  temp_data[v][i] = Values[v][i];
230  }
231  }
232 
233  // clear and resize Values_pt
234  delete[] Values[0];
235  double *values = new double[this->nrow()*n_vector];
236  for(unsigned v=0;v<n_vector;v++) {Values[v] = &values[v*this->nrow()];}
237 
238  // create a int vector of first rows
239  int* dist_first_row = new int[nproc];
240  int* dist_nrow_local = new int[nproc];
241  for (int p = 0; p < nproc; p++)
242  {
243  dist_first_row[p] = this->first_row(p);
244  dist_nrow_local[p] = this->nrow_local(p);
245  }
246 
247  // gather the local vectors from all processors on all processors
248  int my_local_data(this->nrow_local());
249 
250  //Loop over all vectors
251  for(unsigned v=0;v<n_vector;v++)
252  {
253  MPI_Allgatherv(temp_data[v],my_local_data,MPI_DOUBLE,
254  Values[v],dist_nrow_local,
255  dist_first_row,MPI_DOUBLE,
256  this->distribution_pt()->communicator_pt()->mpi_comm());
257  }
258 
259  // update the distribution
260  this->build_distribution(dist_pt);
261 
262  // delete the temp_data
263  delete[] temp_data[0]; delete[] temp_data;
264 
265  // clean up
266  delete[] dist_first_row;
267  delete[] dist_nrow_local;
268  }
269 
270  // if this vector is not distrubted but the target vector is
271  else if (!this->distributed() && dist_pt->distributed())
272  {
273 
274  // cache the new nrow_local
275  unsigned nrow_local = dist_pt->nrow_local();
276 
277  // and first_row
278  unsigned first_row = dist_pt->first_row();
279 
280  const unsigned n_local_data = nrow_local;
281  double **temp_data = new double*[n_vector];
282  double *contiguous_temp_data = new double[n_vector*n_local_data];
283 
284  // copy the data
285  for(unsigned v=0;v<n_vector;v++)
286  {
287  temp_data[v] = &contiguous_temp_data[v*n_local_data];
288  for (unsigned i = 0; i < n_local_data; i++)
289  {
290  temp_data[v][i] = Values[v][first_row + i];
291  }
292  }
293 
294  //copy to Values_pt
295  delete[] Values[0]; delete[] Values;
296  Values = temp_data;
297 
298  // update the distribution
299  this->build_distribution(dist_pt);
300 
301  }
302 
303  // copy the Distribution
304  this->build_distribution(dist_pt);
305  }
306 #endif
307 
308  //Update the doublevector representation
310 
311  }
312 
313 
314 }
315 
unsigned Nvector
The number of vectors.
void redistribute(const LinearAlgebraDistribution *const &dist_pt)
Allows are external data to be used by this vector. WARNING: The size of the external data must corre...
double ** values()
access function to the underlying values
cstr elem_len * i
Definition: cfortran.h:607
OomphCommunicator * communicator_pt() const
const access to the communicator pointer
unsigned nrow() const
access function to the number of global rows.
unsigned first_row() const
access function for the first row on this processor. If not distributed then this is just zero...
bool distributed() const
access function to the distributed - indicates whether the distribution is serial or distributed ...
void setup_doublevector_representation()
compute the A-norm using the matrix at matrix_pt
bool distributed() const
distribution is serial or distributed
Describes the distribution of a distributable linear algebra type object. Typically this is a contain...
unsigned nrow_local() const
access function for the num of local rows on this processor. If no MPI then Nrow is returned...
unsigned first_row() const
access function for the first row on this processor
bool Internal_values
Boolean flag to indicate whether the vector&#39;s data (values_pt) is owned by this vector.
unsigned nrow() const
access function to the number of global rows.
void build_distribution(const LinearAlgebraDistribution *const dist_pt)
setup the distribution of this distributable linear algebra object
unsigned nrow_local() const
access function for the num of local rows on this processor.
LinearAlgebraDistribution * distribution_pt() const
access to the LinearAlgebraDistribution
An oomph-lib wrapper to the MPI_Comm communicator object. Just contains an MPI_Comm object (which is ...
Definition: communicator.h:57