52 std::ostringstream error_message;
54 "This multi vector does not own its data (i.e. data has been " 55 <<
"passed in via set_external_values() and therefore " 56 <<
"cannot be redistributed";
58 OOMPH_CURRENT_FUNCTION,
59 OOMPH_EXCEPTION_LOCATION);
65 std::ostringstream error_message;
66 error_message <<
"The number of global rows in the new distribution (" 67 << dist_pt->
nrow() <<
") is not equal to the number" 68 <<
" of global rows in the current distribution (" 69 << this->
nrow() <<
").\n";
71 OOMPH_CURRENT_FUNCTION,
72 OOMPH_EXCEPTION_LOCATION);
79 std::ostringstream error_message;
80 error_message <<
"The new distribution and the current distribution must " 81 <<
"have the same communicator.";
83 OOMPH_CURRENT_FUNCTION,
84 OOMPH_EXCEPTION_LOCATION);
92 const unsigned n_vector = this->
Nvector;
107 for (
int i = 0;
i < nproc;
i++)
124 for (
int p = 0; p < nproc; p++)
127 if ((new_first_row_data[p] < (current_first_row_data[my_rank] +
128 current_nrow_local_data[my_rank])) &&
129 (current_first_row_data[my_rank] < (new_first_row_data[p] +
130 new_nrow_local_data[p])))
132 new_first_row_for_proc[p] =
133 std::max(current_first_row_data[my_rank],
134 new_first_row_data[p]);
135 new_nrow_local_for_proc[p] =
136 std::min((current_first_row_data[my_rank] +
137 current_nrow_local_data[my_rank]),
138 (new_first_row_data[p] +
139 new_nrow_local_data[p])) - new_first_row_for_proc[p];
143 if ((new_first_row_data[my_rank] < (current_first_row_data[p] +
144 current_nrow_local_data[p]))
145 && (current_first_row_data[p] < (new_first_row_data[my_rank] +
146 new_nrow_local_data[my_rank])))
148 new_first_row_from_proc[p] =
149 std::max(current_first_row_data[p],
150 new_first_row_data[my_rank]);
151 new_nrow_local_from_proc[p] =
152 std::min((current_first_row_data[p] +
153 current_nrow_local_data[p]),
154 (new_first_row_data[my_rank] +
155 new_nrow_local_data[my_rank]))-new_first_row_from_proc[p];
160 double **temp_data =
new double*[n_vector];
161 double *contiguous_temp_data =
162 new double[n_vector*new_nrow_local_data[my_rank]];
163 for(
unsigned v=0;v<n_vector;++v)
165 temp_data[v] = &contiguous_temp_data[v*new_nrow_local_data[my_rank]];
170 if (new_nrow_local_for_proc[my_rank] != 0)
172 unsigned j = new_first_row_for_proc[my_rank] -
173 current_first_row_data[my_rank];
174 unsigned k = new_first_row_for_proc[my_rank] -
175 new_first_row_data[my_rank];
176 for (
unsigned i = 0;
i < new_nrow_local_for_proc[my_rank];
i++)
178 for(
unsigned v=0;v<n_vector;++v)
180 temp_data[v][k +
i] =
Values[v][j +
i];
186 for (
int p = 1; p < nproc; p++)
189 unsigned dest_p = (my_rank + p)%nproc;
192 unsigned source_p = (nproc + my_rank - p)%nproc;
196 for(
unsigned v=0;v<n_vector;v++)
198 MPI_Sendrecv(
Values[v] + new_first_row_for_proc[dest_p] -
199 current_first_row_data[my_rank],
200 new_nrow_local_for_proc[dest_p],
202 temp_data[v] + new_first_row_from_proc[source_p] -
203 new_first_row_data[my_rank],
204 new_nrow_local_from_proc[source_p],
205 MPI_DOUBLE,source_p,1,
221 double **temp_data =
new double*[n_vector];
223 double *contiguous_temp_data =
new double[n_vector*n_local_data];
224 for(
unsigned v=0;v<n_vector;++v)
226 temp_data[v] = &contiguous_temp_data[v*n_local_data];
227 for (
unsigned i = 0;
i < n_local_data;
i++)
235 double *
values =
new double[this->
nrow()*n_vector];
236 for(
unsigned v=0;v<n_vector;v++) {
Values[v] = &values[v*this->
nrow()];}
239 int* dist_first_row =
new int[nproc];
240 int* dist_nrow_local =
new int[nproc];
241 for (
int p = 0; p < nproc; p++)
251 for(
unsigned v=0;v<n_vector;v++)
253 MPI_Allgatherv(temp_data[v],my_local_data,MPI_DOUBLE,
254 Values[v],dist_nrow_local,
255 dist_first_row,MPI_DOUBLE,
263 delete[] temp_data[0];
delete[] temp_data;
266 delete[] dist_first_row;
267 delete[] dist_nrow_local;
281 double **temp_data =
new double*[n_vector];
282 double *contiguous_temp_data =
new double[n_vector*n_local_data];
285 for(
unsigned v=0;v<n_vector;v++)
287 temp_data[v] = &contiguous_temp_data[v*n_local_data];
288 for (
unsigned i = 0;
i < n_local_data;
i++)
290 temp_data[v][
i] =
Values[v][first_row +
i];
unsigned Nvector
The number of vectors.
void redistribute(const LinearAlgebraDistribution *const &dist_pt)
Allows are external data to be used by this vector. WARNING: The size of the external data must corre...
double ** values()
access function to the underlying values
OomphCommunicator * communicator_pt() const
const access to the communicator pointer
unsigned nrow() const
access function to the number of global rows.
unsigned first_row() const
access function for the first row on this processor. If not distributed then this is just zero...
bool distributed() const
access function to the distributed - indicates whether the distribution is serial or distributed ...
void setup_doublevector_representation()
compute the A-norm using the matrix at matrix_pt
bool distributed() const
distribution is serial or distributed
Describes the distribution of a distributable linear algebra type object. Typically this is a contain...
unsigned nrow_local() const
access function for the num of local rows on this processor. If no MPI then Nrow is returned...
unsigned first_row() const
access function for the first row on this processor
bool Internal_values
Boolean flag to indicate whether the vector's data (values_pt) is owned by this vector.
unsigned nrow() const
access function to the number of global rows.
void build_distribution(const LinearAlgebraDistribution *const dist_pt)
setup the distribution of this distributable linear algebra object
unsigned nrow_local() const
access function for the num of local rows on this processor.
LinearAlgebraDistribution * distribution_pt() const
access to the LinearAlgebraDistribution
An oomph-lib wrapper to the MPI_Comm communicator object. Just contains an MPI_Comm object (which is ...