refineable_mesh.template.cc
Go to the documentation of this file.
1 //LIC// ====================================================================
2 //LIC// This file forms part of oomph-lib, the object-oriented,
3 //LIC// multi-physics finite-element library, available
4 //LIC// at http://www.oomph-lib.org.
5 //LIC//
6 //LIC// Version 1.0; svn revision $LastChangedRevision$
7 //LIC//
8 //LIC// $LastChangedDate$
9 //LIC//
10 //LIC// Copyright (C) 2006-2016 Matthias Heil and Andrew Hazel
11 //LIC//
12 //LIC// This library is free software; you can redistribute it and/or
13 //LIC// modify it under the terms of the GNU Lesser General Public
14 //LIC// License as published by the Free Software Foundation; either
15 //LIC// version 2.1 of the License, or (at your option) any later version.
16 //LIC//
17 //LIC// This library is distributed in the hope that it will be useful,
18 //LIC// but WITHOUT ANY WARRANTY; without even the implied warranty of
19 //LIC// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 //LIC// Lesser General Public License for more details.
21 //LIC//
22 //LIC// You should have received a copy of the GNU Lesser General Public
23 //LIC// License along with this library; if not, write to the Free Software
24 //LIC// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 //LIC// 02110-1301 USA.
26 //LIC//
27 //LIC// The authors may be contacted at oomph-lib@maths.man.ac.uk.
28 //LIC//
29 //LIC//====================================================================
30 //Templated refineable mesh functions
31 
32 //Include guards to prevent multiple inclusion of the header
33 #ifndef OOMPH_REFINEABLE_MESH_TEMPLATE_CC
34 #define OOMPH_REFINEABLE_MESH_TEMPLATE_CC
35 
36 // Config header generated by autoconfig
37 #ifdef HAVE_CONFIG_H
38 #include <oomph-lib-config.h>
39 #endif
40 
41 //oomph-lib headers
42 #include "refineable_mesh.h"
43 #include "missing_masters.h"
45 
46 namespace oomph
47 {
48 
49 #ifdef OOMPH_HAS_MPI
50 
51  //========================================================================
52  /// Additional actions required to synchronise halo nodes where master
53  /// nodes could not be found during synchronise_hanging_nodes().
54  /// Overloaded from Mesh class to take care of master nodes on
55  /// the outer edge of the halo layer which do not exist on that
56  /// processor. This fixes problems with the synchronisation of
57  /// hanging nodes for elements with non-uniformly spaced nodes.
58  //========================================================================
59  template<class ELEMENT>
61  additional_synchronise_hanging_nodes(const unsigned& ncont_interpolated_values)
62  {
63  // Check if additional synchronisation of hanging nodes is disabled
64  if(is_additional_synchronisation_of_hanging_nodes_disabled()==true)
65  {
66  return;
67  }
68 
69  //This provides all the node-adding helper functions required to reconstruct
70  //the missing halo master nodes on this processor
71  using namespace Missing_masters_functions;
72 
73 
74  double t_start = 0.0;
75  double t_end = 0.0;
77  {
78  t_start=TimingHelpers::timer();
79  }
80 
81  // Store number of processors and current process
82  MPI_Status status;
83  int n_proc=Comm_pt->nproc();
84  int my_rank=Comm_pt->my_rank();
85 
86 
87 #ifdef PARANOID
88  // Paranoid check to make sure nothing else is using the
89  // external storage. This will need to be changed at some
90  // point if we are to use non-uniformly spaced nodes in
91  // multi-domain problems.
92  bool err=false;
93  //Print out external storage
94  for(int d=0; d<n_proc; d++)
95  {
96  if(d!=my_rank)
97  {
98  //Check to see if external storage is being used by anybody else
99  if(nexternal_haloed_node(d)!=0)
100  {
101  err=true;
102  oomph_info << "Processor " << my_rank << "'s external haloed nodes with processor " << d << " are:" << std::endl;
103  for(unsigned i=0; i<nexternal_haloed_node(d); i++)
104  {
105  oomph_info << "external_haloed_node_pt("<<d<<","<<i<<") = " << external_haloed_node_pt(d,i) << std::endl;
106  oomph_info << "x = ( " << external_haloed_node_pt(d,i)->x(0) << " , " << external_haloed_node_pt(d,i)->x(1) << " )" << std::endl;
107  }
108  }
109  }
110  }
111  for(int d=0; d<n_proc; d++)
112  {
113  if(d!=my_rank)
114  {
115  //Check to see if external storage is being used by anybody else
116  if(nexternal_halo_node(d)!=0)
117  {
118  err=true;
119  oomph_info << "Processor " << my_rank << "'s external halo nodes with processor " << d << " are:" << std::endl;
120  for(unsigned i=0; i<nexternal_halo_node(d); i++)
121  {
122  oomph_info << "external_halo_node_pt("<<d<<","<<i<<") = " << external_halo_node_pt(d,i) << std::endl;
123  oomph_info << "x = ( " << external_halo_node_pt(d,i)->x(0) << " , " << external_halo_node_pt(d,i)->x(1) << " )" << std::endl;
124  }
125  }
126  }
127  }
128  if(err)
129  {
130  std::ostringstream err_stream;
131  err_stream << "There are already some nodes in the external storage"
132  << std::endl
133  << "for this mesh. This bit assumes that nothing else"
134  << std::endl
135  << "uses this storage (for now).";
136  throw OomphLibError(
137  err_stream.str(),
138  OOMPH_CURRENT_FUNCTION,
139  OOMPH_EXCEPTION_LOCATION);
140  }
141 #endif
142 
143 
144 
145  // Compare the halo and haloed nodes for discrepancies in hanging status
146 
147  // Storage for the hanging status of halo/haloed nodes on elements
148  Vector<Vector<int> > haloed_hanging(n_proc);
149  Vector<Vector<int> > halo_hanging(n_proc);
150 
151  // Storage for the haloed nodes with discrepancies in their hanging status
152  // with each processor
154  haloed_hanging_node_with_discrepancy_pt(n_proc);
155 
157  {
158  t_start = TimingHelpers::timer();
159  }
160 
161  // Store number of continuosly interpolated values as int
162  int ncont_inter_values=ncont_interpolated_values;
163 
164  // Loop over processes: Each processor checks that is haloed nodes
165  // with proc d have consistent hanging stats with halo counterparts.
166  for (int d=0; d<n_proc; d++)
167  {
168 
169  // No halo with self: Setup hang info for my haloed nodes with proc d
170  // then get ready to receive halo info from processor d.
171  if (d!=my_rank)
172  {
173 
174  // Loop over haloed nodes
175  unsigned nh=nhaloed_node(d);
176  for (unsigned j=0;j<nh;j++)
177  {
178  // Get node
179  Node* nod_pt=haloed_node_pt(d,j);
180 
181  // Loop over the hanging status for each interpolated variable
182  // (and the geometry)
183  for (int icont=-1; icont<ncont_inter_values; icont++)
184  {
185  // Store the hanging status of this haloed node
186  if (nod_pt->is_hanging(icont))
187  {
188  unsigned n_master=nod_pt->hanging_pt(icont)->nmaster();
189  haloed_hanging[d].push_back(n_master);
190  }
191  else
192  {
193  haloed_hanging[d].push_back(0);
194  }
195  }
196  }
197 
198  // Receive the hanging status information from the corresponding process
199  unsigned count_haloed=haloed_hanging[d].size();
200 
201 #ifdef PARANOID
202  // Check that number of halo and haloed data match
203  unsigned tmp=0;
204  MPI_Recv(&tmp,1,MPI_UNSIGNED,d,0,Comm_pt->mpi_comm(),&status);
205  if (tmp!=count_haloed)
206  {
207  std::ostringstream error_stream;
208  error_stream << "Number of halo data, " << tmp
209  << ", does not match number of haloed data, "
210  << count_haloed << std::endl;
211  throw OomphLibError(
212  error_stream.str(),
213  OOMPH_CURRENT_FUNCTION,
214  OOMPH_EXCEPTION_LOCATION);
215  }
216 #endif
217 
218  // Get the data (if any)
219  if (count_haloed!=0)
220  {
221  halo_hanging[d].resize(count_haloed);
222  MPI_Recv(&halo_hanging[d][0],count_haloed,MPI_INT,d,0,
223  Comm_pt->mpi_comm(),&status);
224  }
225  }
226  else // d==my_rank, i.e. current process: Send halo hanging status
227  // to process dd where it's received (see above) and compared
228  // and compared against the hang status of the haloed nodes
229  {
230  for (int dd=0; dd<n_proc; dd++)
231  {
232  // No halo with yourself
233  if (dd!=d)
234  {
235 
236  // Storage for halo hanging status and counter
237  Vector<int> local_halo_hanging;
238 
239  // Loop over halo nodes
240  unsigned nh=nhalo_node(dd);
241  for (unsigned j=0;j<nh;j++)
242  {
243  // Get node
244  Node* nod_pt=halo_node_pt(dd,j);
245 
246  // Loop over the hanging status for each interpolated variable
247  // (and the geometry)
248  for (int icont=-1; icont<ncont_inter_values; icont++)
249  {
250  // Store hanging status of halo node
251  if (nod_pt->is_hanging(icont))
252  {
253  unsigned n_master=nod_pt->hanging_pt(icont)->nmaster();
254  local_halo_hanging.push_back(n_master);
255  }
256  else
257  {
258  local_halo_hanging.push_back(0);
259  }
260  }
261  }
262 
263 
264  // Send the information to the relevant process
265  unsigned count_halo=local_halo_hanging.size();
266 
267 #ifdef PARANOID
268  // Check that number of halo and haloed data match
269  MPI_Send(&count_halo,1,MPI_UNSIGNED,dd,0,Comm_pt->mpi_comm());
270 #endif
271 
272  // Send data (if any)
273  if (count_halo!=0)
274  {
275  MPI_Send(&local_halo_hanging[0],count_halo,MPI_INT,
276  dd,0,Comm_pt->mpi_comm());
277  }
278  }
279  }
280  }
281  }
282 
284  {
285  t_end = TimingHelpers::timer();
286  oomph_info << "Time for first all-to-all in additional_synchronise_hanging_nodes(): "
287  << t_end-t_start << std::endl;
288  t_start = TimingHelpers::timer();
289  }
290 
291 
292  // Now compare equivalent halo and haloed vectors to find discrepancies.
293  // It is possible that a master node may not be on either process involved
294  // in the halo-haloed scheme; to work round this, we use the shared_node
295  // storage scheme, which stores all nodes that are on each pair of processors
296  // in the same order on each of the two processors
297 
298 
299  // Loop over domains: Each processor checks consistency of hang status
300  // of its haloed nodes with proc d against the halo counterpart. Haloed
301  // wins if there are any discrepancies.
302  for (int d=0; d<n_proc; d++)
303  {
304  // No halo with yourself
305  if (d!=my_rank)
306  {
307  // Counter for traversing haloed data
308  unsigned count=0;
309 
310  // Loop over haloed nodes
311  unsigned nh=nhaloed_node(d);
312  for (unsigned j=0;j<nh;j++)
313  {
314  // Get node
315  Node* nod_pt=haloed_node_pt(d,j);
316 
317  // Loop over the hanging status for each interpolated variable
318  // (and the geometry)
319  for (int icont=-1; icont<ncont_inter_values; icont++)
320  {
321  // Compare hanging status of halo/haloed counterpart structure
322 
323  // Haloed is is hanging and haloed has different number
324  // of master nodes (which includes none in which case it isn't
325  // hanging)
326  if ((haloed_hanging[d][count]>0)&&
327  (haloed_hanging[d][count]!=halo_hanging[d][count]))
328  {
329  // Store this node so it can be synchronised later
330  haloed_hanging_node_with_discrepancy_pt[d].insert(
331  std::pair<Node*,unsigned>(nod_pt,d));
332  }
333  // Increment counter for number of haloed data
334  count++;
335  } // end of loop over icont
336  } // end of loop over haloed nodes
337  }
338  }// end loop over all processors
339 
340 
341 
342  // Populate external halo(ed) node storage with master nodes of halo(ed)
343  // nodes
344 
345  // Loop over domains: Each processor checks consistency of hang status
346  // of its haloed nodes with proc d against the halo counterpart. Haloed
347  // wins if there are any discrepancies.
348  for (int d=0; d<n_proc; d++)
349  {
350  // No halo with yourself
351  if (d!=my_rank)
352  {
353  //Now add haloed master nodes to external storage
354  //===============================================
355 
356  //Storage for data to be sent
357  Vector<unsigned> send_unsigneds(0);
358  Vector<double> send_doubles(0);
359 
360  //Count number of haloed nonmaster nodes for halo process
361  unsigned nhaloed_nonmaster_nodes_processed = 0;
362  Vector<unsigned> haloed_nonmaster_node_index(0);
363 
364  //Loop over hanging halo nodes with discrepancies
365  std::map<Node*,unsigned>::iterator j;
366  for(j=haloed_hanging_node_with_discrepancy_pt[d].begin(); j!=haloed_hanging_node_with_discrepancy_pt[d].end(); j++)
367  {
368  Node* nod_pt = (*j).first;
369  //Find index of this haloed node in the halo storage of processor d
370  //(But find in shared node storage in case it is actually haloed on
371  //another processor which we don't know about)
372  std::vector<Node*>::iterator it
373  = std::find(Shared_node_pt[d].begin(),
374  Shared_node_pt[d].end(),
375  nod_pt);
376  if(it != Shared_node_pt[d].end())
377  {
378  //Tell other processor to create this node
379  //send_unsigneds.push_back(1);
380  nhaloed_nonmaster_nodes_processed++;
381 
382  //Tell the other processor where to find this node in its halo node
383  //storage
384  unsigned index = it - Shared_node_pt[d].begin();
385  haloed_nonmaster_node_index.push_back(index);
386 
387  //Tell this processor that this node is really a haloed node
388  //This also packages up the data which needs to be sent to the
389  //processor on which the halo equivalent node lives
391  (d, nod_pt, this, ncont_inter_values,
392  send_unsigneds, send_doubles);
393  }
394  else
395  {
396  throw OomphLibError(
397  "Haloed node not found in haloed node storage",
398  OOMPH_CURRENT_FUNCTION,
399  OOMPH_EXCEPTION_LOCATION);
400  }
401  }
402 
403  //How much data needs to be sent?
404  unsigned send_unsigneds_count = send_unsigneds.size();
405  unsigned send_doubles_count = send_doubles.size();
406 
407  //Send ammount of data
408  MPI_Send(&send_unsigneds_count,1,MPI_UNSIGNED,d,0,Comm_pt->mpi_comm());
409  MPI_Send(&send_doubles_count,1,MPI_UNSIGNED,d,1,Comm_pt->mpi_comm());
410 
411  //Send to halo process the number of haloed nodes we processed
412  MPI_Send(&nhaloed_nonmaster_nodes_processed,1,MPI_UNSIGNED,d,2,
413  Comm_pt->mpi_comm());
414  if(nhaloed_nonmaster_nodes_processed>0)
415  {
416  MPI_Send(&haloed_nonmaster_node_index[0],
417  nhaloed_nonmaster_nodes_processed,MPI_UNSIGNED,d,3,
418  Comm_pt->mpi_comm());
419  }
420 
421  //Send data about external halo nodes
422  if(send_unsigneds_count>0)
423  {
424  //Only send if there is anything to send
425  MPI_Send(&send_unsigneds[0],send_unsigneds_count,MPI_UNSIGNED,d,4,
426  Comm_pt->mpi_comm());
427  }
428  if(send_doubles_count>0)
429  {
430  //Only send if there is anything to send
431  MPI_Send(&send_doubles[0],send_doubles_count,MPI_DOUBLE,d,5,
432  Comm_pt->mpi_comm());
433  }
434 
435  }
436  else // (d==my_rank), current process
437  {
438  //Now construct and add halo versions of master nodes to external storage
439  //=======================================================================
440 
441  //Loop over processors to get data
442  for(int dd=0; dd<n_proc; dd++)
443  {
444  //Don't talk to yourself
445  if(dd!=d)
446  {
447  //How much data to be received
448  unsigned nrecv_unsigneds = 0;
449  unsigned nrecv_doubles = 0;
450  MPI_Recv(&nrecv_unsigneds,1,MPI_UNSIGNED,dd,0,
451  Comm_pt->mpi_comm(),&status);
452  MPI_Recv(&nrecv_doubles,1,MPI_UNSIGNED,dd,1,
453  Comm_pt->mpi_comm(),&status);
454 
455  //Get from haloed process the number of halo nodes we need to process
456  unsigned nhalo_nonmaster_nodes_to_process = 0;
457  MPI_Recv(&nhalo_nonmaster_nodes_to_process,1,MPI_UNSIGNED,dd,2,
458  Comm_pt->mpi_comm(),&status);
459  Vector<unsigned> halo_nonmaster_node_index(
460  nhalo_nonmaster_nodes_to_process);
461  if (nhalo_nonmaster_nodes_to_process!=0)
462  {
463  MPI_Recv(&halo_nonmaster_node_index[0],
464  nhalo_nonmaster_nodes_to_process,MPI_UNSIGNED,dd,3,
465  Comm_pt->mpi_comm(),&status);
466  }
467 
468  //Storage for data to be received
469  Vector<unsigned> recv_unsigneds(nrecv_unsigneds);
470  Vector<double> recv_doubles(nrecv_doubles);
471 
472  //Receive data about external haloed equivalent nodes
473  if(nrecv_unsigneds>0)
474  {
475  //Only send if there is anything to send
476  MPI_Recv(&recv_unsigneds[0],nrecv_unsigneds,MPI_UNSIGNED,dd,4,
477  Comm_pt->mpi_comm(),&status);
478  }
479  if(nrecv_doubles>0)
480  {
481  //Only send if there is anything to send
482  MPI_Recv(&recv_doubles[0],nrecv_doubles,MPI_DOUBLE,dd,5,
483  Comm_pt->mpi_comm(),&status);
484  }
485 
486  //Counters for flat packed data counters
487  unsigned recv_unsigneds_count = 0;
488  unsigned recv_doubles_count = 0;
489 
490  //Loop over halo nodes with discrepancies in their hanging status
491  for(unsigned j=0; j<nhalo_nonmaster_nodes_to_process; j++)
492  {
493  //Get pointer to halo nonmaster node which needs processing
494  //(But given index is its index in the shared storage)
495  Node* nod_pt = shared_node_pt(dd,halo_nonmaster_node_index[j]);
496 
497 #ifdef PARANOID
498  //Check if we have a MacroElementNodeUpdateNode
499  if(dynamic_cast<MacroElementNodeUpdateNode*>(nod_pt))
500  {
501  //BENFLAG: The construction of missing master nodes for
502  // MacroElementNodeUpdateNodes does not work as expected.
503  // They require MacroElementNodeUpdateElements to be
504  // created for the missing halo nodes which will be
505  // added. It behaves as expected until duplicate nodes
506  // are pruned at the problem level.
507  std::ostringstream err_stream;
508  err_stream << "This currently doesn't work for"
509  << std::endl
510  << "MacroElementNodeUpdateNodes because these require"
511  << std::endl
512  << "MacroElementNodeUpdateElements to be created for"
513  << std::endl
514  << "the missing halo nodes which will be added"
515  << std::endl;
516  throw OomphLibError(err_stream.str(),
517  OOMPH_CURRENT_FUNCTION,
518  OOMPH_EXCEPTION_LOCATION);
519  //OomphLibWarning(err_stream.str(),
520  // OOMPH_CURRENT_FUNCTION,
521  // OOMPH_EXCEPTION_LOCATION);
522  }
523 #endif
524 
525  //Construct copy of node and add to external halo node storage.
526  unsigned loc_p = (unsigned) dd;
527  unsigned node_index;
528  recursively_add_masters_of_external_halo_node_to_storage<ELEMENT>
529  (nod_pt, this, loc_p, node_index, ncont_inter_values,
530  recv_unsigneds_count, recv_unsigneds,
531  recv_doubles_count, recv_doubles);
532  }
533 
534  } // end of dd!=d
535  } // end of second loop over all processors
536  }
537  } // end loop over all processors
538 
539 
541  {
542  t_end = TimingHelpers::timer();
543  oomph_info
544  << "Time for second all-to-all in additional_synchronise_hanging_nodes() "
545  << t_end-t_start << std::endl;
546  t_start = TimingHelpers::timer();
547  }
548 
549  // Populate external halo(ed) node storage with master nodes of halo(ed)
550  // nodes [end]
551 
552  //Count how many external halo/haloed nodes are added
553  unsigned external_halo_count=0;
554  unsigned external_haloed_count=0;
555 
556  //Flag to test whether we attampt to add any duplicate haloed nodes to the
557  //shared storage -- if this is the case then we have duplicate halo nodes
558  //on another processor but with different pointers and the shared scheme
559  //will not be set up correctly
560  bool duplicate_haloed_node_exists = false;
561 
562  //Loop over all the processors and add the shared nodes
563  for (int d=0;d<n_proc;d++)
564  {
565 
566  // map of bools for whether the (external) node has been shared,
567  // initialised to 0 (false) for each domain d
568  std::map<Node*,bool> node_shared;
569 
570  // For all domains lower than the current domain: Do halos first
571  // then haloed, to ensure correct order in lookup scheme from
572  // the other side
573  if (d<my_rank)
574  {
575  //Do external halo nodes
576  unsigned nexternal_halo_nod=nexternal_halo_node(d);
577  for (unsigned j=0;j<nexternal_halo_nod;j++)
578  {
579  Node* nod_pt=external_halo_node_pt(d,j);
580 
581  // Add it as a shared node from current domain
582  if (!node_shared[nod_pt])
583  {
584  this->add_shared_node_pt(d,nod_pt);
585  node_shared[nod_pt]=true;
586  external_halo_count++;
587  }
588 
589  } // end loop over nodes
590 
591  //Do external haloed nodes
592  unsigned nexternal_haloed_nod=nexternal_haloed_node(d);
593  for (unsigned j=0;j<nexternal_haloed_nod;j++)
594  {
595  Node* nod_pt=external_haloed_node_pt(d,j);
596 
597  // Add it as a shared node from current domain
598  if (!node_shared[nod_pt])
599  {
600  this->add_shared_node_pt(d,nod_pt);
601  node_shared[nod_pt]=true;
602  external_haloed_count++;
603  }
604  else
605  {
606  duplicate_haloed_node_exists = true;
607  }
608 
609  } // end loop over nodes
610 
611  }
612 
613  // If the domain is bigger than the current rank: Do haloed first
614  // then halo, to ensure correct order in lookup scheme from
615  // the other side
616  if (d>my_rank)
617  {
618  //Do external haloed nodes
619  unsigned nexternal_haloed_nod=nexternal_haloed_node(d);
620  for (unsigned j=0;j<nexternal_haloed_nod;j++)
621  {
622  Node* nod_pt=external_haloed_node_pt(d,j);
623 
624  // Add it as a shared node from current domain
625  if (!node_shared[nod_pt])
626  {
627  this->add_shared_node_pt(d,nod_pt);
628  node_shared[nod_pt]=true;
629  external_haloed_count++;
630  }
631  else
632  {
633  duplicate_haloed_node_exists = true;
634  }
635 
636  } // end loop over nodes
637 
638  //Do external halo nodes
639  unsigned nexternal_halo_nod=nexternal_halo_node(d);
640  for (unsigned j=0;j<nexternal_halo_nod;j++)
641  {
642  Node* nod_pt=external_halo_node_pt(d,j);
643 
644  // Add it as a shared node from current domain
645  if (!node_shared[nod_pt])
646  {
647  this->add_shared_node_pt(d,nod_pt);
648  node_shared[nod_pt]=true;
649  external_halo_count++;
650  }
651 
652  } // end loop over nodes
653 
654  } // end if (d ...)
655 
656  } // end loop over processes
657 
658 
659  //Say how many external halo/haloed nodes were added
660  oomph_info << "INFO: " << external_halo_count
661  << " external halo nodes and"
662  << std::endl;
663  oomph_info << "INFO: " << external_haloed_count
664  << " external haloed nodes were added to the shared node scheme"
665  << std::endl;
666 
667  //If we added duplicate haloed nodes, throw an error
668  if(duplicate_haloed_node_exists)
669  {
670  // This problem should now be avoided because we are using existing
671  // communication methods to locate nodes in this case. The error used
672  // to arise as follows:
673  //// Let my_rank==A. If this has happened then it means that
674  //// duplicate haloed nodes exist on another processor (B). This
675  //// problem arises if a master of a haloed node with a discrepancy
676  //// is haloed with a different processor (C). A copy is constructed
677  //// in the external halo storage on processor (B) because that node
678  //// is not found in the (internal) haloed storage on (A) with (B)
679  //// but that node already exists on processor (B) in the (internal)
680  //// halo storage with processor (C). Thus two copies of this master
681  //// node now exist on processor (B).
682 
683  std::ostringstream err_stream;
684  err_stream << "Duplicate halo nodes exist on another processor!"
685  << std::endl
686  << "(See source code for more detailed explanation)"
687  << std::endl;
688 
689  throw OomphLibError(
690  err_stream.str(),
691  OOMPH_CURRENT_FUNCTION,
692  OOMPH_EXCEPTION_LOCATION);
693  }
694 
695 
697  {
698  t_end = TimingHelpers::timer();
699  oomph_info
700  << "Time for identification of shared nodes in additional_synchronise_hanging_nodes(): "
701  << t_end-t_start << std::endl;
702  }
703 
704  }
705 
706 #endif
707 
708 }
709 
710 #endif
HangInfo *const & hanging_pt() const
Return pointer to hanging node data (this refers to the geometric hanging node status) (const version...
Definition: nodes.h:1148
cstr elem_len * i
Definition: cfortran.h:607
void recursively_add_masters_of_external_haloed_node(int &iproc, Node *nod_pt, Mesh *const &mesh_pt, int &n_cont_inter_values, Vector< unsigned > &send_unsigneds, Vector< double > &send_doubles)
Recursively add any master nodes (and their master nodes etc) of external haloed nodes.
Nodes are derived from Data, but, in addition, have a definite (Eulerian) position in a space of a gi...
Definition: nodes.h:852
unsigned nmaster() const
Return the number of master nodes.
Definition: nodes.h:733
OomphInfo oomph_info
bool is_hanging() const
Test whether the node is geometrically hanging.
Definition: nodes.h:1207
bool Doc_comprehensive_timings
Global boolean to switch on comprehensive timing – can probably be declared const false when develop...
void additional_synchronise_hanging_nodes(const unsigned &ncont_interpolated_values)
double timer()
returns the time in seconds after some point in past