55 std::map<unsigned,Vector<unsigned> >to_be_haloed;
59 std::map<unsigned,Vector<unsigned> > halo_entries;
62 const unsigned my_rank =
67 const unsigned n_global_eqn = required_global_eqn.size();
70 for(
unsigned n=0;n<n_global_eqn;n++)
73 const unsigned i_global = required_global_eqn[n];
78 if(my_rank!=rank_of_global)
81 unsigned i_local = i_global - dist_pt->
first_row(rank_of_global);
83 halo_entries[rank_of_global].push_back(index);
86 to_be_haloed[rank_of_global].push_back(i_local);
103 int send_data_count=0;
108 = to_be_haloed.begin();it!=to_be_haloed.end();++it)
110 const unsigned rank = it->first;
111 const unsigned size_ = it->second.size();
113 send_displacement[rank] = send_data_count;
115 send_n[rank] =
static_cast<int> (size_);
116 send_data_count += size_;
124 MPI_Alltoall(&send_n[0],1,MPI_INT,&
Haloed_n[0],1,MPI_INT,
129 if(send_data_count==0) {++send_data_count;}
134 = to_be_haloed.begin();it!=to_be_haloed.end();++it)
138 it2 != it->second.end();++it2)
140 send_data[count] = (*it2);
147 int receive_data_count=0;
149 for(
int d=0;d<n_proc;d++)
158 if(receive_data_count==0) {++receive_data_count;}
161 MPI_Alltoallv(&send_data[0],&send_n[0],&send_displacement[0],
174 unsigned receive_haloed_count=0;
175 for(
int d=0;d<n_proc;d++)
178 std::map<unsigned,Vector<unsigned> >::iterator it
179 = halo_entries.find(d);
181 if(it==halo_entries.end())
189 const int size_ = it->second.size();
192 Halo_eqns.resize(receive_haloed_count+size_);
193 for(
int i=0;
i<size_;
i++)
197 receive_haloed_count += size_;
211 const std::map<unsigned,double*> &halo_data_pt,
Vector<double*> &halo_dof_pt)
216 halo_dof_pt.resize(n_halo);
219 for(std::map<unsigned,unsigned>::iterator it=
Local_index.begin();
223 std::map<unsigned,double*>::const_iterator it2 =
224 halo_data_pt.find(it->first);
226 if(it2!=halo_data_pt.end())
229 halo_dof_pt[it->second] = it2->second;
233 std::ostringstream error_stream;
234 error_stream <<
"Global equation " << it->first
235 <<
" reqired as halo is not stored in halo_data_pt\n";
238 OOMPH_CURRENT_FUNCTION,
239 OOMPH_EXCEPTION_LOCATION);
257 if(this->distributed())
260 const unsigned n_send = Halo_scheme_pt->Haloed_eqns.size();
263 for(
unsigned i=0;
i<n_send;
i++)
265 send_data[
i] = (*this)[Halo_scheme_pt->Haloed_eqns[
i]];
269 const unsigned n_receive = Halo_scheme_pt->Halo_eqns.size();
273 if(n_send==0) {send_data.resize(1);}
274 if(n_receive==0) {receive_data.resize(1);}
276 MPI_Alltoallv(&send_data[0],&Halo_scheme_pt->Haloed_n[0],
277 &Halo_scheme_pt->Haloed_displacement[0],MPI_DOUBLE,
278 &receive_data[0],&Halo_scheme_pt->Halo_n[0],
279 &Halo_scheme_pt->Halo_displacement[0],MPI_DOUBLE,
280 this->distribution_pt()->communicator_pt()->mpi_comm());
284 for(
unsigned i=0;
i<n_receive;
i++)
286 Halo_value[Halo_scheme_pt->Halo_eqns[
i]] =receive_data[
i];
301 if(this->distributed())
304 const unsigned n_send = Halo_scheme_pt->Halo_eqns.size();
307 for(
unsigned i=0;
i<n_send;
i++)
309 send_data[
i] = Halo_value[Halo_scheme_pt->Halo_eqns[
i]];
313 const unsigned n_receive = Halo_scheme_pt->Haloed_eqns.size();
317 if(n_send==0) {send_data.resize(1);}
318 if(n_receive==0) {receive_data.resize(1);}
320 MPI_Alltoallv(&send_data[0],&Halo_scheme_pt->Halo_n[0],
321 &Halo_scheme_pt->Halo_displacement[0],MPI_DOUBLE,
322 &receive_data[0],&Halo_scheme_pt->Haloed_n[0],
323 &Halo_scheme_pt->Haloed_displacement[0],MPI_DOUBLE,
324 this->distribution_pt()->communicator_pt()->mpi_comm());
328 for(
unsigned i=0;
i<n_receive;
i++)
330 (*this)[Halo_scheme_pt->Haloed_eqns[
i]] += receive_data[
i];
346 Halo_scheme_pt = halo_scheme_pt;
348 if(Halo_scheme_pt!=0)
351 unsigned n_halo_data = halo_scheme_pt->
Local_index.size();
354 Halo_value.resize(n_halo_data);
void sum_all_halo_and_haloed_values()
OomphCommunicator * communicator_pt() const
const access to the communicator pointer
Vector< int > Halo_n
Storage for the number of entries to be received from each other processor.
unsigned first_row() const
access function for the first row on this processor. If not distributed then this is just zero...
bool distributed() const
access function to the distributed - indicates whether the distribution is serial or distributed ...
Vector< int > Halo_displacement
Storage for the offsets of the processor data in the receive buffer.
Vector< unsigned > Halo_eqns
Storage for all the entries that are to be received from other processors (received_from_proc0,received_from_proc1,...received_from_procn)
Describes the distribution of a distributable linear algebra type object. Typically this is a contain...
void build_halo_scheme(DoubleVectorHaloScheme *const &halo_scheme_pt)
Construct the halo scheme and storage for the halo data.
Vector< int > Haloed_displacement
Storage for the offsets of the haloed entries for each processor in the packed Haloed_eqns array...
unsigned rank_of_global_row(const unsigned i) const
return the processor rank of the global row number i
Vector< unsigned > Haloed_eqns
The haloed entries that will be sent in a format compatible with MPI_Alltoallv i.e. (send_to_proc0,send_to_proc1 ... send_to_procn)
void synchronise()
Synchronise the halo data.
Vector< int > Haloed_n
Storage for the number of haloed entries to be sent to each processor.
void setup_halo_dofs(const std::map< unsigned, double *> &halo_data_pt, Vector< double *> &halo_dof_pt)
Function that sets up a vector of pointers to halo data, index using the scheme in Local_index...
std::map< unsigned, unsigned > Local_index
Storage for the translation scheme from global unknown to local index in the additional storage vecto...
DoubleVectorHaloScheme(LinearAlgebraDistribution *const &dist_pt, const Vector< unsigned > &required_global_eqn)
Constructor that sets up the required information communicating between all processors. Requires two "all to all" communications. Arguments are the distribution of the DoubleVector and a Vector of global unknowns required on this processor.