37 template<
typename MATRIX>
62 if (this->is_master_block_preconditioner())
64 std::ostringstream err_msg;
68 err_msg <<
"No meshes have been set for this block preconditioner!\n" 69 <<
"Set one with set_nmesh(...), set_mesh(...)" << std::endl;
71 OOMPH_CURRENT_FUNCTION,
72 OOMPH_EXCEPTION_LOCATION);
73 for (
unsigned m=0;m<n;m++)
77 err_msg <<
"The mesh pointer to mesh " << m <<
" is null!\n" 78 <<
"Set a non-null one with set_mesh(...)" << std::endl;
80 OOMPH_CURRENT_FUNCTION,
81 OOMPH_EXCEPTION_LOCATION);
92 if(is_subsidiary_block_preconditioner())
96 unsigned para_doftype_in_master_preconditioner_coarse_size
97 = Doftype_in_master_preconditioner_coarse.size();
103 if(para_doftype_in_master_preconditioner_coarse_size == 0)
105 std::ostringstream err_msg;
106 err_msg <<
"The mapping from the dof types of the master " 107 <<
"block preconditioner \n" 108 <<
"to the subsidiary block preconditioner is empty.\n" 109 <<
"Doftype_in_master_preconditioner_coarse.size() == 0 \n" 110 <<
"has turn_into_subsidiary_block_preconditioner(...)\n" 111 <<
"been called with the correct parameters?\n" 114 OOMPH_CURRENT_FUNCTION,
115 OOMPH_EXCEPTION_LOCATION);
159 std::set<unsigned> doftype_map_set;
163 unsigned para_doftype_coarsen_map_coarse_size
164 = Doftype_coarsen_map_coarse.size();
172 for (
unsigned i = 0;
i < para_doftype_coarsen_map_coarse_size;
i++)
175 unsigned para_doftype_coarsen_map_coarse_i_size
176 = Doftype_coarsen_map_coarse[
i].size();
177 for (
unsigned j = 0; j < para_doftype_coarsen_map_coarse_i_size; j++)
180 std::pair<std::set<unsigned>::iterator,
bool> doftype_map_ret
181 = doftype_map_set.insert(Doftype_coarsen_map_coarse[
i][j]);
183 if(!doftype_map_ret.second)
185 std::ostringstream err_msg;
186 err_msg <<
"Error: the doftype number " 187 << Doftype_coarsen_map_coarse[
i][j]
188 <<
" is already inserted." 191 OOMPH_CURRENT_FUNCTION,
192 OOMPH_EXCEPTION_LOCATION);
201 if(para_doftype_in_master_preconditioner_coarse_size
202 != doftype_map_set.size())
204 std::ostringstream err_msg;
205 err_msg <<
"The size of doftype_in_master_preconditioner_coarse " 206 <<
"must be the same as the total\n" 207 <<
"number of values in the doftype_coarsen_map_coarse vector." 210 OOMPH_CURRENT_FUNCTION,
211 OOMPH_EXCEPTION_LOCATION);
217 unsigned para_doftype_in_master_preconditioner_coarse_size_minus_one
218 = para_doftype_in_master_preconditioner_coarse_size - 1;
219 if(para_doftype_in_master_preconditioner_coarse_size_minus_one
220 != *doftype_map_set.rbegin())
222 std::ostringstream err_msg;
223 err_msg <<
"The maximum dof type number in the " 224 <<
"doftype_coarsen_map vector must be " 225 << para_doftype_in_master_preconditioner_coarse_size_minus_one
228 OOMPH_CURRENT_FUNCTION,
229 OOMPH_EXCEPTION_LOCATION);
254 Doftype_in_master_preconditioner_fine.resize(0);
255 Doftype_coarsen_map_fine.resize(0);
286 if(master_block_preconditioner_pt()->doftype_coarsen_map_fine().size() == 0)
288 std::ostringstream err_msg;
289 err_msg <<
"The master block preconditioner's " 290 <<
"Doftype_coarsen_map_fine is not\n" 291 <<
"set up properly.\n" 293 <<
"This vector is constructed in the function " 294 <<
"block_setup(...).\n" 297 OOMPH_CURRENT_FUNCTION,
298 OOMPH_EXCEPTION_LOCATION);
302 unsigned doftype_in_master_preconditioner_coarse_size
303 = Doftype_in_master_preconditioner_coarse.size();
305 i < doftype_in_master_preconditioner_coarse_size;
i++)
308 unsigned subvec_index = Doftype_in_master_preconditioner_coarse[
i];
313 = Master_block_preconditioner_pt
314 ->get_fine_grain_dof_types_in(subvec_index);
316 Doftype_in_master_preconditioner_fine.insert(
317 Doftype_in_master_preconditioner_fine.end(),
318 tmp_master_dof_subvec.begin(),
319 tmp_master_dof_subvec.end());
393 unsigned dof_type_index = 0;
395 i < doftype_in_master_preconditioner_coarse_size;
i++)
399 unsigned coarse_dof = Doftype_in_master_preconditioner_coarse[
i];
401 unsigned n_master_fine_doftypes
402 = Master_block_preconditioner_pt->nfine_grain_dof_types_in(coarse_dof);
405 for (
unsigned j = 0; j < n_master_fine_doftypes; j++)
407 tmp_sub_vec.push_back(dof_type_index);
410 master_fine_doftype_translated.push_back(tmp_sub_vec);
419 unsigned doftype_coarsen_map_coarse_size
420 = Doftype_coarsen_map_coarse.size();
421 for (
unsigned i = 0;
i < doftype_coarsen_map_coarse_size;
i++)
424 unsigned doftype_coarsen_map_coarse_i_size
425 = Doftype_coarsen_map_coarse[
i].size();
426 for (
unsigned j = 0; j < doftype_coarsen_map_coarse_i_size; j++)
428 unsigned subvec_i = Doftype_coarsen_map_coarse[
i][j];
430 tmp_vec.insert(tmp_vec.end(),
431 master_fine_doftype_translated[subvec_i].begin(),
432 master_fine_doftype_translated[subvec_i].end());
435 Doftype_coarsen_map_fine.push_back(tmp_vec);
440 Internal_ndof_types = Doftype_in_master_preconditioner_fine.size();
443 Internal_nblock_types = Internal_ndof_types;
448 for (
unsigned b = 0; b < Internal_ndof_types; b++)
450 Nrow += this->internal_dof_block_dimension(b);
456 std::ostringstream error_message;
458 <<
"Nrow=0 in subsidiary preconditioner. This seems fishy and\n" 459 <<
"suggests that block_setup() was not called for the \n" 460 <<
"master block preconditioner yet.";
462 OOMPH_CURRENT_FUNCTION,
463 OOMPH_EXCEPTION_LOCATION);
486 if(is_master_block_preconditioner())
489 unsigned n_external_dof_types = dof_to_block_map.size();
497 unsigned n_internal_dof_types = internal_ndof_types();
499 if (n_internal_dof_types != n_external_dof_types)
501 std::ostringstream err_msg;
503 <<
"The internal ndof types and the length of the dof_to_block_map\n" 504 <<
"vector is not the same. Since this is the master block " 505 <<
"preconditioner,\n" 506 <<
"you must describe which block each DOF type belongs to,\n" 507 <<
"no more, no less." 508 <<
"internal_ndof_types = " << n_internal_dof_types <<
"\n" 509 <<
"dof_to_block_map.size() = " << n_external_dof_types <<
"\n";
511 OOMPH_CURRENT_FUNCTION,
512 OOMPH_EXCEPTION_LOCATION);
517 Doftype_coarsen_map_fine.clear();
518 Doftype_coarsen_map_coarse.clear();
519 Doftype_coarsen_map_fine.reserve(n_external_dof_types);
520 Doftype_coarsen_map_coarse.reserve(n_external_dof_types);
523 for (
unsigned i = 0;
i < n_external_dof_types;
i++)
527 Doftype_coarsen_map_fine.push_back(tmp_vec);
528 Doftype_coarsen_map_coarse.push_back(tmp_vec);
538 if( (Doftype_coarsen_map_fine.size() == 0)
539 ||(Doftype_coarsen_map_coarse.size() == 0))
541 std::ostringstream err_msg;
543 <<
"Either the Doftype_coarsen_map_fine or the \n" 544 <<
"Doftype_coarsen_map_coarse vectors is of size 0.\n" 545 <<
"Did you remember to call the function " 546 <<
"turn_into_subsidiary_block_preconditioner(...)?";
548 OOMPH_CURRENT_FUNCTION,
549 OOMPH_EXCEPTION_LOCATION);
599 unsigned dof_to_block_map_size = dof_to_block_map.size();
602 if(dof_to_block_map_size != Doftype_coarsen_map_coarse.size())
604 std::ostringstream err_msg;
606 <<
"The size of dof_to_block_map and Doftype_coarsen_map_coarse is not " 608 <<
"dof_to_block_map.size() = " << dof_to_block_map_size <<
"\n" 609 <<
"Doftype_coarsen_map_coarse.size() = " 610 << Doftype_coarsen_map_coarse.size() <<
".\n" 611 <<
"One of the two list is incorrect, please look at the comments\n" 612 <<
"in the source code for more details.";
614 OOMPH_CURRENT_FUNCTION,
615 OOMPH_EXCEPTION_LOCATION);
623 unsigned max_block_number = *std::max_element(dof_to_block_map.begin(),
624 dof_to_block_map.end());
642 Block_to_dof_map_coarse.clear();
644 const unsigned tmp_nblock = max_block_number + 1;
646 Block_to_dof_map_coarse.resize(tmp_nblock);
648 for (
unsigned i = 0;
i < dof_to_block_map_size;
i++)
650 Block_to_dof_map_coarse[dof_to_block_map[
i]].push_back(
i);
653 Block_to_dof_map_fine.clear();
654 Block_to_dof_map_fine.resize(tmp_nblock);
655 for (
unsigned block_i = 0; block_i < tmp_nblock; block_i++)
658 const unsigned ndof_in_block = Block_to_dof_map_coarse[block_i].size();
659 for (
unsigned dof_i = 0; dof_i < ndof_in_block; dof_i++)
661 const unsigned coarsened_dof_i=Block_to_dof_map_coarse[block_i][dof_i];
666 =Doftype_coarsen_map_fine[coarsened_dof_i];
668 Block_to_dof_map_fine[block_i].insert(
669 Block_to_dof_map_fine[block_i].end(),
685 unsigned tmp_internal_ndof_types = internal_ndof_types();
687 dof_to_block_map.resize(tmp_internal_ndof_types,0);
689 for (
unsigned i = 0;
i < tmp_internal_ndof_types;
i++)
691 dof_to_block_map[
i] =
i;
700 if(is_master_block_preconditioner())
705 unsigned local_nmesh = nmesh();
710 std::ostringstream error_msg;
711 error_msg <<
"Cannot setup blocks because no meshes have been set.";
713 OOMPH_CURRENT_FUNCTION,
714 OOMPH_EXCEPTION_LOCATION);
720 for (
unsigned mesh_i = 0; mesh_i < local_nmesh; mesh_i++)
723 unsigned n_element = mesh_pt(mesh_i)->nelement();
731 =
typeid(*(mesh_pt(mesh_i)->element_pt(0))).name();
735 if(
bool(Allow_multiple_element_type_in_mesh[mesh_i]))
738 unsigned first_element_ndof_type =
739 mesh_pt(mesh_i)->element_pt(0)->ndof_types();
742 for (
unsigned el_i = 1; el_i < n_element; el_i++)
745 unsigned current_element_ndof_type =
746 mesh_pt(mesh_i)->element_pt(el_i)->ndof_types();
750 =
typeid(*(mesh_pt(mesh_i)->element_pt(el_i))).name();
753 if(current_element_ndof_type != first_element_ndof_type)
755 std::ostringstream error_message;
757 <<
"Elements in the same mesh MUST have the same number of types " 759 <<
"The element in mesh " << mesh_i <<
", at position " 761 << current_element_string <<
", \n" 762 <<
"with ndof types: " << current_element_ndof_type <<
".\n" 763 <<
"The first element in the same mesh is: \n" 764 << first_element_string <<
", \n" 765 <<
"with ndof types: " << first_element_ndof_type <<
".\n";
767 OOMPH_CURRENT_FUNCTION,
768 OOMPH_EXCEPTION_LOCATION);
777 for (
unsigned el_i = 1; el_i < n_element; el_i++)
781 =
typeid(*(mesh_pt(mesh_i)->element_pt(el_i))).name();
784 if(current_element_string.compare(first_element_string) != 0)
786 std::ostringstream error_message;
788 <<
"By default, a mesh containing block preconditionable " 789 <<
"elements must contain only one type of element.\n" 790 <<
"The element in mesh " << mesh_i <<
", at position " 791 << el_i <<
" is: \n" << current_element_string <<
"\n" 792 <<
"The first element in the same mesh is: \n" 793 << first_element_string <<
"\n" 794 <<
"If this is correct, consider calling the set_mesh(...) with\n" 795 <<
"the optional argument set true to allow multiple element\n" 796 <<
"types in the same mesh.\n" 797 <<
"Note: A minimal requirement is that the elements in the same\n" 798 <<
"mesh MUST have the same number of DOF types.";
800 OOMPH_CURRENT_FUNCTION,
801 OOMPH_EXCEPTION_LOCATION);
811 this->clear_block_preconditioner_base();
815 unsigned my_rank = comm_pt()->my_rank();
816 unsigned nproc = comm_pt()->nproc();
824 unsigned* nreq_sparse =
new unsigned[nproc]();
825 unsigned* nreq_sparse_for_proc =
new unsigned[nproc]();
826 unsigned** index_in_dof_block_sparse_send =
new unsigned*[nproc]();
827 unsigned** dof_number_sparse_send =
new unsigned*[nproc]();
835 if (is_master_block_preconditioner())
838 Ndof_types_in_mesh.assign(nmesh(),0);
839 for(
unsigned i=0;
i<nmesh();
i++)
841 Ndof_types_in_mesh[
i] = mesh_pt(
i)->ndof_types();
845 if (dynamic_cast<DistributableLinearAlgebraObject*>(matrix_pt()))
847 this->build_distribution
848 (dynamic_cast<DistributableLinearAlgebraObject*>
849 (matrix_pt())->distribution_pt());
854 matrix_pt()->nrow(),
false);
855 this->build_distribution(dist);
857 Nrow = matrix_pt()->nrow();
861 bool matrix_distributed =
862 (this->distribution_pt()->distributed() &&
863 this->distribution_pt()->communicator_pt()->nproc() > 1);
870 if (cr_matrix_pt == 0)
872 std::ostringstream error_message;
873 error_message <<
"Block setup for distributed matrices only works " 874 <<
"for CRDoubleMatrices";
876 OOMPH_CURRENT_FUNCTION,
877 OOMPH_EXCEPTION_LOCATION);
884 unsigned first_row = this->distribution_pt()->first_row();
885 unsigned nrow_local = this->distribution_pt()->nrow_local();
886 unsigned last_row = first_row+nrow_local-1;
894 for (
unsigned p = 0; p < nproc; p++)
896 dense_required_rows(p,0) = this->distribution_pt()->first_row(p);
897 dense_required_rows(p,1) = this->distribution_pt()->first_row(p)
898 +this->distribution_pt()->nrow_local(p) - 1;
905 std::set<unsigned> sparse_global_rows_for_block_lookup;
906 if (matrix_distributed)
908 unsigned nnz = cr_matrix_pt->
nnz();
910 for (
unsigned i = 0;
i < nnz;
i++)
912 unsigned ci = column_index[
i];
913 if (ci<first_row || ci>last_row)
915 sparse_global_rows_for_block_lookup.insert(ci);
920 int nsparse = sparse_global_rows_for_block_lookup.size();
922 Global_index_sparse.resize(0);
923 std::copy(sparse_global_rows_for_block_lookup.begin(),
924 sparse_global_rows_for_block_lookup.end(),
925 std::back_inserter(Global_index_sparse));
927 Index_in_dof_block_sparse.resize(nsparse);
928 Dof_number_sparse.resize(nsparse);
929 sparse_global_rows_for_block_lookup.clear();
932 if (matrix_distributed)
934 MPI_Aint base_displacement_sparse;
935 MPI_Get_address(nreq_sparse,&base_displacement_sparse);
938 for (
unsigned p = 0; p < nproc; p++)
943 for (
int i = 0;
i < nsparse; ++
i)
945 if (Global_index_sparse[
i]<dense_required_rows(p,0))
951 if (Global_index_sparse[
i]<=dense_required_rows(p,1))
963 if (nreq_sparse[p]>0)
968 MPI_Isend(&nreq_sparse[p],1,MPI_UNSIGNED,p,31,
969 comm_pt()->mpi_comm(),&req1);
970 send_requests_sparse.push_back(req1);
974 MPI_Isend(&Global_index_sparse[begin],
975 nreq_sparse[p],MPI_UNSIGNED,p,32,
976 comm_pt()->mpi_comm(),&req2);
977 send_requests_sparse.push_back(req2);
982 MPI_Datatype types[2];
983 MPI_Aint displacements[2];
987 MPI_Type_contiguous(nreq_sparse[p],MPI_UNSIGNED,&types[0]);
988 MPI_Type_commit(&types[0]);
989 MPI_Get_address(&Index_in_dof_block_sparse[begin],&displacements[0]);
990 displacements[0] -= base_displacement_sparse;
994 MPI_Type_contiguous(nreq_sparse[p],MPI_UNSIGNED,&types[1]);
995 MPI_Type_commit(&types[1]);
996 MPI_Get_address(&Dof_number_sparse[begin],&displacements[1]);
997 displacements[1] -= base_displacement_sparse;
1001 MPI_Datatype recv_type;
1002 MPI_Type_create_struct(2,lengths,displacements,types,&recv_type);
1003 MPI_Type_commit(&recv_type);
1004 MPI_Type_free(&types[0]);
1005 MPI_Type_free(&types[1]);
1009 MPI_Irecv(nreq_sparse,1,recv_type,p,33,
1010 comm_pt()->mpi_comm(),&req);
1011 recv_requests_sparse.push_back(req);
1012 MPI_Type_free(&recv_type);
1016 if (nreq_sparse[p]==0)
1019 MPI_Isend(&zero,1,MPI_UNSIGNED,p,31,
1020 comm_pt()->mpi_comm(),&req1);
1021 send_requests_sparse.push_back(req1);
1026 MPI_Irecv(&nreq_sparse_for_proc[p],1,MPI_UNSIGNED,p,31,
1027 comm_pt()->mpi_comm(),&req);
1028 recv_requests_sparse_nreq.push_back(req);
1034 Dof_number_dense.resize(nrow_local);
1035 Index_in_dof_block_dense.resize(nrow_local);
1038 Internal_ndof_types = 0;
1043 Vector<int> previously_assigned_block_number(nrow_local,
1048 bool problem_distributed =
false;
1051 #ifdef OOMPH_HAS_MPI 1052 problem_distributed = any_mesh_distributed();
1056 if (!problem_distributed)
1061 unsigned dof_offset=0;
1064 for (
unsigned m=0;m<nmesh();m++)
1067 unsigned n_element = mesh_pt(m)->nelement();
1071 unsigned ndof_in_element = ndof_types_in_mesh(m);
1072 Internal_ndof_types += ndof_in_element;
1074 for (
unsigned e=0;
e<n_element;
e++)
1078 std::list<std::pair<unsigned long,unsigned> > dof_lookup_list;
1081 mesh_pt(m)->element_pt(
e)->
1082 get_dof_numbers_for_unknowns(dof_lookup_list);
1086 typedef std::list<std::pair<unsigned long,unsigned> >::iterator IT;
1087 for (IT it=dof_lookup_list.begin();
1088 it!=dof_lookup_list.end();it++)
1090 unsigned long global_dof = it->first;
1091 if (global_dof >=
unsigned(first_row) &&
1092 global_dof <= unsigned(last_row))
1094 unsigned dof_number = (it->second)+dof_offset;
1095 Dof_number_dense[global_dof-first_row]=dof_number;
1099 if (previously_assigned_block_number[global_dof-
1102 previously_assigned_block_number[global_dof-first_row]
1115 dof_offset+=ndof_in_element;
1120 for (
unsigned i = 0;
i < nrow_local;
i++)
1122 if (previously_assigned_block_number[
i] < 0)
1124 std::ostringstream error_message;
1125 error_message <<
"Not all degrees of freedom have had DOF type " 1126 <<
"numbers allocated. Dof number " <<
i 1127 <<
" is unallocated.";
1129 OOMPH_CURRENT_FUNCTION,
1130 OOMPH_EXCEPTION_LOCATION);
1138 #ifdef OOMPH_HAS_MPI 1142 unsigned dof_offset=0;
1146 std::map<unsigned long,unsigned > my_dof_map;
1149 for (
unsigned m=0;m<nmesh();m++)
1152 unsigned n_element = this->mesh_pt(m)->nelement();
1156 unsigned ndof_in_element = ndof_types_in_mesh(m);
1157 Internal_ndof_types += ndof_in_element;
1160 for (
unsigned e=0;
e<n_element;
e++)
1164 if (!this->mesh_pt(m)->element_pt(
e)->is_halo())
1168 std::list<std::pair<unsigned long,unsigned> > dof_lookup_list;
1172 this->mesh_pt(m)->element_pt(
e)->
1173 get_dof_numbers_for_unknowns(dof_lookup_list);
1177 std::list<std::pair<unsigned long,unsigned> >::iterator IT;
1178 for (IT it=dof_lookup_list.begin();
1179 it!=dof_lookup_list.end();it++)
1181 it->second = (it->second)+dof_offset;
1182 my_dof_map[it->first] = it->second;
1192 dof_offset+=ndof_in_element;
1196 unsigned my_ndof = my_dof_map.size();
1197 unsigned long* my_global_dofs =
new unsigned long[my_ndof];
1198 unsigned* my_dof_numbers =
new unsigned[my_ndof];
1200 std::map<unsigned long,unsigned >::iterator IT;
1202 for (IT it = my_dof_map.begin(); it != my_dof_map.end(); it++)
1204 my_global_dofs[pt] = it->first;
1205 my_dof_numbers[pt] = it->second;
1213 int* first_dof_to_send =
new int[nproc];
1214 int* ndof_to_send =
new int[nproc];
1216 for (
unsigned p = 0; p < nproc; p++)
1218 first_dof_to_send[p] = 0;
1219 ndof_to_send[p] = 0;
1220 while (ptr < my_ndof && my_global_dofs[ptr] < dense_required_rows(p,0))
1224 first_dof_to_send[p] = ptr;
1225 while (ptr < my_ndof && my_global_dofs[ptr] <= dense_required_rows(p,1))
1233 int* ndof_to_recv =
new int[nproc];
1234 MPI_Alltoall(ndof_to_send,1,MPI_INT,ndof_to_recv,1,MPI_INT,
1235 comm_pt()->mpi_comm());
1238 MPI_Aint base_displacement;
1239 MPI_Get_address(my_global_dofs,&base_displacement);
1244 std::vector<bool> dof_recv(nrow_local,
false);
1253 for (
unsigned p = 0; p < nproc; p++)
1259 if (ndof_to_send[p] > 0)
1262 MPI_Datatype types[2];
1263 MPI_Aint displacements[2];
1267 MPI_Type_contiguous(ndof_to_send[p],MPI_UNSIGNED_LONG,&types[0]);
1268 MPI_Type_commit(&types[0]);
1269 MPI_Get_address(my_global_dofs + first_dof_to_send[p],
1271 displacements[0] -= base_displacement;
1275 MPI_Type_contiguous(ndof_to_send[p],MPI_UNSIGNED,&types[1]);
1276 MPI_Type_commit(&types[1]);
1277 MPI_Get_address(my_dof_numbers + first_dof_to_send[p],
1279 displacements[1] -= base_displacement;
1283 MPI_Datatype send_type;
1284 MPI_Type_create_struct(2,lengths,displacements,types,&send_type);
1285 MPI_Type_commit(&send_type);
1286 MPI_Type_free(&types[0]);
1287 MPI_Type_free(&types[1]);
1291 MPI_Isend(my_global_dofs,1,send_type,p,2,
1292 comm_pt()->mpi_comm(),&req);
1293 send_requests.push_back(req);
1294 MPI_Type_free(&send_type);
1298 if (ndof_to_recv[p] > 0)
1301 global_dofs_recv[p] =
new unsigned long[ndof_to_recv[p]];
1302 dof_numbers_recv[p] =
new unsigned[ndof_to_recv[p]];
1306 MPI_Datatype types[2];
1307 MPI_Aint displacements[2];
1311 MPI_Type_contiguous(ndof_to_recv[p],MPI_UNSIGNED_LONG,&types[0]);
1312 MPI_Type_commit(&types[0]);
1313 MPI_Get_address(global_dofs_recv[p],&displacements[0]);
1314 displacements[0] -= base_displacement;
1318 MPI_Type_contiguous(ndof_to_recv[p],MPI_UNSIGNED,&types[1]);
1319 MPI_Type_commit(&types[1]);
1320 MPI_Get_address(dof_numbers_recv[p],&displacements[1]);
1321 displacements[1] -= base_displacement;
1325 MPI_Datatype recv_type;
1326 MPI_Type_create_struct(2,lengths,displacements,types,&recv_type);
1327 MPI_Type_commit(&recv_type);
1328 MPI_Type_free(&types[0]);
1329 MPI_Type_free(&types[1]);
1333 MPI_Irecv(my_global_dofs,1,recv_type,p,2,
1334 comm_pt()->mpi_comm(),&req);
1335 recv_requests.push_back(req);
1336 MPI_Type_free(&recv_type);
1343 unsigned u = first_dof_to_send[p] + ndof_to_recv[p];
1344 for (
unsigned i = first_dof_to_send[p];
i < u;
i++)
1348 dof_recv[my_global_dofs[
i]-first_row] =
true;
1350 Dof_number_dense[my_global_dofs[
i]-first_row] =
1357 unsigned c_recv = recv_requests.size();
1363 MPI_Waitany(c_recv,&recv_requests[0],&req_number,MPI_STATUS_IGNORE);
1364 recv_requests.erase(recv_requests.begin()+req_number);
1368 unsigned p = proc[req_number];
1369 proc.erase(proc.begin()+req_number);
1372 for (
int i = 0;
i < ndof_to_recv[p];
i++)
1376 dof_recv[global_dofs_recv[p][
i]-first_row] =
true;
1378 Dof_number_dense[global_dofs_recv[p][
i]-first_row]
1379 = dof_numbers_recv[p][
i];
1383 delete[] global_dofs_recv[p];
1384 delete[] dof_numbers_recv[p];
1389 unsigned csr = send_requests.size();
1392 MPI_Waitall(csr,&send_requests[0],MPI_STATUS_IGNORE);
1396 delete[] ndof_to_send;
1397 delete[] first_dof_to_send;
1398 delete[] ndof_to_recv;
1399 delete[] my_global_dofs;
1400 delete[] my_dof_numbers;
1402 unsigned all_recv =
true;
1403 for (
unsigned i = 0;
i < nrow_local;
i++)
1412 std::ostringstream error_message;
1413 error_message <<
"Not all the DOF numbers required were received";
1415 OOMPH_CURRENT_FUNCTION,
1416 OOMPH_EXCEPTION_LOCATION);
1420 std::ostringstream error_message;
1422 <<
"The problem appears to be distributed, MPI is required";
1424 OOMPH_CURRENT_FUNCTION,
1425 OOMPH_EXCEPTION_LOCATION);
1428 #ifdef OOMPH_HAS_MPI 1431 if (matrix_distributed)
1435 if (recv_requests_sparse_nreq.size()>0)
1437 MPI_Waitall(recv_requests_sparse_nreq.size(),
1438 &recv_requests_sparse_nreq[0],
1441 for (
unsigned p = 0; p < nproc; ++p)
1443 if (nreq_sparse_for_proc[p] > 0)
1446 sparse_rows_for_proc[p] =
new unsigned[nreq_sparse_for_proc[p]];
1447 MPI_Irecv(sparse_rows_for_proc[p],nreq_sparse_for_proc[p],
1449 comm_pt()->mpi_comm(),&req);
1450 sparse_rows_for_proc_requests.push_back(req);
1461 Dof_dimension.assign(Internal_ndof_types,0);
1464 if (!matrix_distributed)
1467 unsigned nrow = this->distribution_pt()->nrow();
1468 Index_in_dof_block_dense.resize(nrow);
1470 for (
unsigned i = 0;
i < nrow;
i++)
1472 Index_in_dof_block_dense[
i] = Dof_dimension[Dof_number_dense[
i]];
1473 Dof_dimension[Dof_number_dense[
i]]++;
1480 #ifdef OOMPH_HAS_MPI 1485 unsigned* my_nrows_in_dof_block =
new unsigned[Internal_ndof_types];
1486 for (
unsigned i = 0;
i < Internal_ndof_types;
i++)
1488 my_nrows_in_dof_block[
i] = 0;
1490 for (
unsigned i = 0;
i < nrow_local;
i++)
1492 my_nrows_in_dof_block[Dof_number_dense[
i]]++;
1496 unsigned* nrow_in_dof_block_recv =
new unsigned[Internal_ndof_types*nproc];
1497 MPI_Allgather(my_nrows_in_dof_block,Internal_ndof_types,MPI_UNSIGNED,
1498 nrow_in_dof_block_recv,Internal_ndof_types,MPI_UNSIGNED,
1499 comm_pt()->mpi_comm());
1500 delete[] my_nrows_in_dof_block;
1504 for (
unsigned i = 0;
i < Internal_ndof_types;
i++)
1506 for (
unsigned p = 0; p < my_rank; p++)
1508 my_first_dof_index[
i] += nrow_in_dof_block_recv[p*Internal_ndof_types +
i];
1510 Dof_dimension[
i] = my_first_dof_index[
i];
1511 for (
unsigned p = my_rank; p < nproc; p++)
1513 Dof_dimension[
i] += nrow_in_dof_block_recv[p*Internal_ndof_types +
i];
1516 delete[] nrow_in_dof_block_recv;
1519 Index_in_dof_block_dense.resize(nrow_local);
1522 for (
unsigned i = 0;
i < nrow_local;
i++)
1524 Index_in_dof_block_dense[
i] =
1525 my_first_dof_index[Dof_number_dense[
i]] +
1526 dof_counter[Dof_number_dense[
i]];
1527 dof_counter[Dof_number_dense[
i]]++;
1531 if (sparse_rows_for_proc_requests.size()>0)
1533 MPI_Waitall(sparse_rows_for_proc_requests.size(),
1534 &sparse_rows_for_proc_requests[0],
1537 MPI_Aint base_displacement;
1538 MPI_Get_address(dof_number_sparse_send,&base_displacement);
1539 unsigned first_row = this->distribution_pt()->first_row();
1540 for (
unsigned p = 0; p < nproc; ++p)
1542 if (nreq_sparse_for_proc[p]>0)
1545 index_in_dof_block_sparse_send[p] =
1546 new unsigned[nreq_sparse_for_proc[p]];
1547 dof_number_sparse_send[p] =
1548 new unsigned[nreq_sparse_for_proc[p]];
1549 for (
unsigned i = 0;
i < nreq_sparse_for_proc[p]; ++
i)
1551 unsigned r = sparse_rows_for_proc[p][
i];
1553 index_in_dof_block_sparse_send[p][
i]
1554 = Index_in_dof_block_dense[r];
1555 dof_number_sparse_send[p][
i]
1556 = Dof_number_dense[r];
1558 delete[] sparse_rows_for_proc[p];
1562 MPI_Datatype types[2];
1563 MPI_Aint displacements[2];
1567 MPI_Type_contiguous(nreq_sparse_for_proc[p],MPI_UNSIGNED,&types[0]);
1568 MPI_Type_commit(&types[0]);
1569 MPI_Get_address(index_in_dof_block_sparse_send[p],&displacements[0]);
1570 displacements[0] -= base_displacement;
1574 MPI_Type_contiguous(nreq_sparse_for_proc[p],MPI_UNSIGNED,&types[1]);
1575 MPI_Type_commit(&types[1]);
1576 MPI_Get_address(dof_number_sparse_send[p],&displacements[1]);
1577 displacements[1] -= base_displacement;
1581 MPI_Datatype send_type;
1582 MPI_Type_create_struct(2,lengths,displacements,types,&send_type);
1583 MPI_Type_commit(&send_type);
1584 MPI_Type_free(&types[0]);
1585 MPI_Type_free(&types[1]);
1589 MPI_Isend(dof_number_sparse_send,1,send_type,p,33,
1590 comm_pt()->mpi_comm(),&req);
1591 send_requests_sparse.push_back(req);
1592 MPI_Type_free(&send_type);
1596 index_in_dof_block_sparse_send[p] = 0;
1597 dof_number_sparse_send[p] = 0;
1612 if (dof_to_block_map.size() != Internal_ndof_types)
1614 std::ostringstream error_message;
1616 <<
"The dof_to_block_map vector (size=" 1617 << dof_to_block_map.size() <<
") must be of size Internal_ndof_types=" 1618 << Internal_ndof_types;
1620 error_message.str(),
1621 OOMPH_CURRENT_FUNCTION,
1622 OOMPH_EXCEPTION_LOCATION);
1627 unsigned max_block_number = 0;
1628 for (
unsigned i = 0;
i < Internal_ndof_types;
i++)
1630 if (dof_to_block_map[
i] > max_block_number)
1632 max_block_number = dof_to_block_map[
i];
1637 Block_number_to_dof_number_lookup.clear();
1638 Block_number_to_dof_number_lookup.resize(max_block_number+1);
1639 Ndof_in_block.clear();
1640 Ndof_in_block.resize(max_block_number+1);
1643 Dof_number_to_block_number_lookup.resize(Internal_ndof_types);
1646 for (
unsigned i = 0;
i < Internal_ndof_types;
i++)
1648 Dof_number_to_block_number_lookup[
i] = dof_to_block_map[
i];
1649 Block_number_to_dof_number_lookup[dof_to_block_map[
i]].push_back(
i);
1650 Ndof_in_block[dof_to_block_map[
i]]++;
1656 for (
unsigned i = 0;
i < max_block_number+1;
i++)
1658 if (Block_number_to_dof_number_lookup[
i].size() == 0)
1660 std::ostringstream error_message;
1661 error_message <<
"block number " <<
i 1662 <<
" does not have any DOFs associated with it";
1664 error_message.str(),
1665 OOMPH_CURRENT_FUNCTION,
1666 OOMPH_EXCEPTION_LOCATION);
1672 Internal_nblock_types = max_block_number+1;
1675 bool distributed = this->master_distribution_pt()->distributed();
1678 Internal_block_distribution_pt.resize(Internal_nblock_types);
1679 for (
unsigned i = 0;
i < Internal_nblock_types;
i++)
1681 unsigned block_dim = 0;
1682 for (
unsigned j = 0; j < Ndof_in_block[
i]; j++)
1685 internal_dof_block_dimension(Block_number_to_dof_number_lookup[
i][j]);
1687 Internal_block_distribution_pt[
i] =
new 1689 block_dim,distributed);
1697 if(is_subsidiary_block_preconditioner())
1700 const unsigned dof_block_distribution_size
1701 = Dof_block_distribution_pt.size();
1702 for (
unsigned dof_i = 0; dof_i < dof_block_distribution_size; dof_i++)
1704 delete Dof_block_distribution_pt[dof_i];
1706 const unsigned ndofs = this->ndof_types();
1707 Dof_block_distribution_pt.resize(ndofs,0);
1711 for (
unsigned dof_i = 0; dof_i < ndofs; dof_i++)
1715 const unsigned ncoarsened_dofs_in_dof_i =
1716 Doftype_coarsen_map_coarse[dof_i].size();
1718 tmp_dist_pt(ncoarsened_dofs_in_dof_i,0);
1719 for (
unsigned parent_dof_i=0;parent_dof_i<ncoarsened_dofs_in_dof_i;
1722 tmp_dist_pt[parent_dof_i]
1723 = master_block_preconditioner_pt()
1724 ->dof_block_distribution_pt(
1725 Doftype_in_master_preconditioner_coarse[
1726 Doftype_coarsen_map_coarse[dof_i][parent_dof_i] ] );
1733 *Dof_block_distribution_pt[
1745 unsigned n_existing_block_dist
1746 = Block_distribution_pt.size();
1747 for (
unsigned dist_i = 0; dist_i < n_existing_block_dist; dist_i++)
1749 delete Block_distribution_pt[dist_i];
1752 Block_distribution_pt.clear();
1755 unsigned super_block_size = Block_to_dof_map_coarse.size();
1756 Block_distribution_pt.resize(super_block_size,0);
1757 for (
unsigned super_block_i = 0;
1758 super_block_i < super_block_size; super_block_i++)
1760 unsigned sub_block_size = Block_to_dof_map_coarse[super_block_i].size();
1763 for (
unsigned sub_block_i = 0;
1764 sub_block_i < sub_block_size; sub_block_i++)
1766 tmp_dist_pt[sub_block_i]
1767 = dof_block_distribution_pt(
1768 Block_to_dof_map_coarse[super_block_i][sub_block_i]);
1771 Block_distribution_pt[super_block_i]
1775 tmp_dist_pt,*Block_distribution_pt[super_block_i]);
1791 if (is_subsidiary_block_preconditioner())
1793 this->build_distribution(dist);
1797 Internal_preconditioner_matrix_distribution_pt =
new 1803 concatenate(Block_distribution_pt,*Preconditioner_matrix_distribution_pt);
1811 const unsigned nblocks = Block_distribution_pt.size();
1813 for (
unsigned i = 0;
i < nblocks;
i++)
1815 preconditioner_matrix_key[
i] =
i;
1821 =Auxiliary_block_distribution_pt.begin();
1822 while(iter!=Auxiliary_block_distribution_pt.end())
1824 if(iter->first!=preconditioner_matrix_key)
1826 delete iter->second;
1836 Auxiliary_block_distribution_pt.
clear();
1839 insert_auxiliary_block_distribution(preconditioner_matrix_key,
1840 Preconditioner_matrix_distribution_pt);
1844 #ifdef OOMPH_HAS_MPI 1845 if (send_requests_sparse.size()>0)
1847 MPI_Waitall(send_requests_sparse.size(),
1848 &send_requests_sparse[0],MPI_STATUS_IGNORE);
1850 if (recv_requests_sparse.size()>0)
1852 MPI_Waitall(recv_requests_sparse.size(),
1853 &recv_requests_sparse[0],MPI_STATUS_IGNORE);
1855 for (
unsigned p = 0; p < nproc; p++)
1857 delete[] index_in_dof_block_sparse_send[p];
1858 delete[] dof_number_sparse_send[p];
1860 delete[] index_in_dof_block_sparse_send;
1861 delete[] dof_number_sparse_send;
1862 delete[] nreq_sparse;
1863 delete[] nreq_sparse_for_proc;
1872 Global_index.resize(Internal_nblock_types);
1873 for (
unsigned b = 0; b < Internal_nblock_types; b++)
1875 Global_index[b].resize(Internal_block_distribution_pt[b]->nrow());
1879 unsigned nrow=this->master_nrow();
1880 for (
unsigned i=0;
i<nrow;
i++)
1883 int dof_number=this->internal_dof_number(
i);
1888 unsigned block_number = Dof_number_to_block_number_lookup[dof_number];
1891 unsigned index_in_block=0;
1893 while (
int(Block_number_to_dof_number_lookup[block_number][ptr])
1897 internal_dof_block_dimension(Block_number_to_dof_number_lookup[
1902 index_in_block+=internal_index_in_dof(
i);
1903 Global_index[block_number][index_in_block]=
i;
1910 #ifdef OOMPH_HAS_MPI 1914 this->master_distribution_pt();
1917 Nrows_to_send_for_get_block.resize(Internal_nblock_types,nproc);
1918 Nrows_to_send_for_get_block.initialise(0);
1919 Nrows_to_send_for_get_ordered.resize(nproc);
1920 Nrows_to_send_for_get_ordered.initialise(0);
1923 unsigned nrow_local = master_distribution_pt->
nrow_local();
1924 unsigned first_row = master_distribution_pt->
first_row();
1925 for (
unsigned i = 0;
i < nrow_local;
i++)
1929 int b = this->internal_block_number(first_row +
i);
1935 unsigned j = this->internal_index_in_block(first_row +
i);
1938 unsigned block_p = 0;
1939 while(!(Internal_block_distribution_pt[b]->first_row(block_p) <= j &&
1940 (Internal_block_distribution_pt[b]->first_row(block_p) +
1941 Internal_block_distribution_pt[b]->nrow_local(block_p) > j)))
1947 Nrows_to_send_for_get_block(b,block_p)++;
1948 Nrows_to_send_for_get_ordered[block_p]++;
1953 Nrows_to_recv_for_get_block.resize(Internal_nblock_types,nproc);
1954 Nrows_to_recv_for_get_block.initialise(0);
1955 Nrows_to_recv_for_get_ordered.resize(nproc);
1956 Nrows_to_recv_for_get_ordered.initialise(0);
1964 for (
unsigned p = 0; p < nproc; p++)
1970 nrows_to_send[p] =
new unsigned[Internal_nblock_types];
1971 for (
unsigned b = 0; b < Internal_nblock_types; b++)
1973 nrows_to_send[p][b] =
1974 Nrows_to_send_for_get_block(b,p);
1977 MPI_Isend(nrows_to_send[p],Internal_nblock_types,MPI_UNSIGNED,p,3,
1978 comm_pt()->mpi_comm(),&s_req);
1979 send_requests_nrow.push_back(s_req);
1982 nrows_to_recv[p] =
new unsigned[Internal_nblock_types];
1984 MPI_Irecv(nrows_to_recv[p],Internal_nblock_types,MPI_UNSIGNED,p,3,
1985 comm_pt()->mpi_comm(),&r_req);
1986 recv_requests_nrow.push_back(r_req);
1991 for (
unsigned b = 0; b < Internal_nblock_types; b++)
1993 Nrows_to_recv_for_get_block(b,p) =
1994 Nrows_to_send_for_get_block(b,p);
1996 Nrows_to_recv_for_get_ordered[p] = Nrows_to_send_for_get_ordered[p];
2006 Rows_to_send_for_get_block.resize(Internal_nblock_types,nproc);
2007 Rows_to_send_for_get_block.initialise(0);
2008 Rows_to_send_for_get_ordered.resize(nproc);
2009 Rows_to_send_for_get_ordered.initialise(0);
2010 Rows_to_recv_for_get_block.resize(Internal_nblock_types,nproc);
2011 Rows_to_recv_for_get_block.initialise(0);
2014 for (
unsigned p = 0; p < nproc; p++)
2016 for (
unsigned b = 0; b < Internal_nblock_types; b++)
2018 Rows_to_send_for_get_block(b,p)
2019 =
new int[Nrows_to_send_for_get_block(b,p)];
2022 block_rows_to_send(b,p)
2023 =
new int[Nrows_to_send_for_get_block(b,p)];
2027 Rows_to_recv_for_get_block(b,p)
2028 =
new int[Nrows_to_send_for_get_block(b,p)];
2031 Rows_to_send_for_get_ordered[p]
2032 =
new int [Nrows_to_send_for_get_ordered[p]];
2039 for (
unsigned i = 0;
i < nrow_local;
i++)
2042 int b = this->internal_block_number(first_row +
i);
2049 unsigned j = this->internal_index_in_block(first_row +
i);
2052 unsigned block_p = 0;
2053 while(!(Internal_block_distribution_pt[b]->first_row(block_p) <= j &&
2054 (Internal_block_distribution_pt[b]->first_row(block_p) +
2055 Internal_block_distribution_pt[b]->nrow_local(block_p) > j)))
2061 Rows_to_send_for_get_block(b,block_p)[ptr_block(b,block_p)] =
i;
2062 if (block_p != my_rank)
2064 block_rows_to_send(b,block_p)[ptr_block(b,block_p)]
2065 = j - Internal_block_distribution_pt[b]->first_row(block_p);
2069 Rows_to_recv_for_get_block(b,block_p)[ptr_block(b,block_p)]
2070 = j - Internal_block_distribution_pt[b]->first_row(block_p);
2072 ptr_block(b,block_p)++;
2077 for (
unsigned p = 0; p < nproc; ++p)
2080 for (
unsigned b = 0; b < Internal_nblock_types; ++b)
2083 for (
unsigned i = 0;
i < Nrows_to_send_for_get_block(b,p); ++
i)
2085 Rows_to_send_for_get_ordered[p][pt] =
2086 Rows_to_send_for_get_block(b,p)[
i];
2095 unsigned c = recv_requests_nrow.size();
2101 MPI_Waitany(c,&recv_requests_nrow[0],&req_number,MPI_STATUS_IGNORE);
2102 recv_requests_nrow.erase(recv_requests_nrow.begin()+req_number);
2106 unsigned p = proc[req_number];
2107 proc.erase(proc.begin()+req_number);
2110 Nrows_to_recv_for_get_ordered[p]=0;
2111 for (
unsigned b = 0; b < Internal_nblock_types; b++)
2113 Nrows_to_recv_for_get_block(b,p) = nrows_to_recv[p][b];
2114 Nrows_to_recv_for_get_ordered[p] += nrows_to_recv[p][b];
2118 delete[] nrows_to_recv[p];
2122 Rows_to_recv_for_get_ordered.resize(nproc,0);
2123 for (
unsigned p = 0; p < nproc; p++)
2127 for (
unsigned b = 0; b < Internal_nblock_types; b++)
2129 Rows_to_recv_for_get_block(b,p)
2130 =
new int[Nrows_to_recv_for_get_block(b,p)];
2139 for (
unsigned p = 0; p < nproc; p++)
2143 for (
unsigned b = 0; b < Internal_nblock_types; b++)
2145 if (Nrows_to_send_for_get_block(b,p) > 0)
2147 nsend_for_rows[p]++;
2149 if (Nrows_to_recv_for_get_block(b,p) > 0)
2151 nrecv_for_rows[p]++;
2158 MPI_Aint base_displacement;
2159 MPI_Get_address(matrix_pt(),&base_displacement);
2161 for (
unsigned p = 0; p < nproc; p++)
2166 if (nsend_for_rows[p] > 0)
2168 MPI_Datatype send_types[nsend_for_rows[p]];
2169 MPI_Aint send_displacements[nsend_for_rows[p]];
2170 int send_sz[nsend_for_rows[p]];
2171 unsigned send_ptr = 0;
2172 for (
unsigned b = 0; b < Internal_nblock_types; b++)
2174 if (Nrows_to_send_for_get_block(b,p) > 0)
2176 MPI_Type_contiguous(Nrows_to_send_for_get_block(b,p),
2177 MPI_INT,&send_types[send_ptr]);
2178 MPI_Type_commit(&send_types[send_ptr]);
2179 MPI_Get_address(block_rows_to_send(b,p),
2180 &send_displacements[send_ptr]);
2181 send_displacements[send_ptr] -= base_displacement;
2182 send_sz[send_ptr] = 1;
2186 MPI_Datatype final_send_type;
2187 MPI_Type_create_struct(nsend_for_rows[p],send_sz,send_displacements,
2188 send_types,&final_send_type);
2189 MPI_Type_commit(&final_send_type);
2190 for (
unsigned i = 0;
i < nsend_for_rows[p];
i++)
2192 MPI_Type_free(&send_types[
i]);
2194 MPI_Request send_req;
2195 MPI_Isend(matrix_pt(),1,final_send_type,p,4,
2196 comm_pt()->mpi_comm(),&send_req);
2197 req_rows.push_back(send_req);
2198 MPI_Type_free(&final_send_type);
2202 if (nrecv_for_rows[p] > 0)
2204 MPI_Datatype recv_types[nrecv_for_rows[p]];
2205 MPI_Aint recv_displacements[nrecv_for_rows[p]];
2206 int recv_sz[nrecv_for_rows[p]];
2207 unsigned recv_ptr = 0;
2208 for (
unsigned b = 0; b < Internal_nblock_types; b++)
2210 if (Nrows_to_recv_for_get_block(b,p) > 0)
2212 MPI_Type_contiguous(Nrows_to_recv_for_get_block(b,p),
2213 MPI_INT,&recv_types[recv_ptr]);
2214 MPI_Type_commit(&recv_types[recv_ptr]);
2215 MPI_Get_address(Rows_to_recv_for_get_block(b,p),
2216 &recv_displacements[recv_ptr]);
2217 recv_displacements[recv_ptr] -= base_displacement;
2218 recv_sz[recv_ptr] = 1;
2222 MPI_Datatype final_recv_type;
2223 MPI_Type_create_struct(nrecv_for_rows[p],recv_sz,recv_displacements,
2224 recv_types,&final_recv_type);
2225 MPI_Type_commit(&final_recv_type);
2226 for (
unsigned i = 0;
i < nrecv_for_rows[p];
i++)
2228 MPI_Type_free(&recv_types[
i]);
2230 MPI_Request recv_req;
2231 MPI_Irecv(matrix_pt(),1,final_recv_type,p,4,
2232 comm_pt()->mpi_comm(),&recv_req);
2233 req_rows.push_back(recv_req);
2234 MPI_Type_free(&final_recv_type);
2244 unsigned n_req_rows = req_rows.size();
2247 MPI_Waitall(n_req_rows,&req_rows[0],MPI_STATUS_IGNORE);
2251 Rows_to_recv_for_get_ordered.resize(nproc);
2252 Rows_to_recv_for_get_ordered.initialise(0);
2256 for (
unsigned b = 1; b < Internal_nblock_types; ++b)
2258 vec_offset[b]=vec_offset[b-1]+Internal_block_distribution_pt[b-1]->nrow_local();
2262 for (
unsigned p = 0; p < nproc; p++)
2265 Rows_to_recv_for_get_ordered[p]
2266 =
new int[Nrows_to_recv_for_get_ordered[p]];
2267 for (
unsigned b = 0; b < Internal_nblock_types; b++)
2269 for (
unsigned i = 0;
i < Nrows_to_recv_for_get_block(b,p);
i++)
2271 Rows_to_recv_for_get_ordered[p][pt] =
2272 Rows_to_recv_for_get_block(b,p)[
i]+vec_offset[b];
2279 for (
unsigned p = 0; p < nproc; p++)
2283 for (
unsigned b = 0; b < Internal_nblock_types; b++)
2285 delete[] block_rows_to_send(b,p);
2287 if (Nrows_to_send_for_get_ordered[p] > 0)
2289 delete[] ordered_rows_to_send[p];
2295 unsigned n_req_send_nrow = send_requests_nrow.size();
2296 if (n_req_send_nrow)
2298 MPI_Waitall(n_req_send_nrow,&send_requests_nrow[0],MPI_STATUS_IGNORE);
2300 for (
unsigned p = 0; p < nproc; p++)
2302 delete[] nrows_to_send[p];
2308 if(block_output_on())
2309 output_blocks_to_files(Output_base_filename);
2335 unsigned doftype_in_master_preconditioner_coarse_size
2336 = doftype_in_master_preconditioner_coarse.size();
2338 for (
unsigned dof_i = 0; dof_i
2339 < doftype_in_master_preconditioner_coarse_size; dof_i++)
2344 doftype_coarsen_map_coarse.push_back(tmp_vec);
2348 turn_into_subsidiary_block_preconditioner(
2349 master_block_prec_pt,
2350 doftype_in_master_preconditioner_coarse,
2351 doftype_coarsen_map_coarse);
2415 Master_block_preconditioner_pt = master_block_prec_pt;
2418 Doftype_coarsen_map_coarse = doftype_coarsen_map_coarse;
2420 Doftype_in_master_preconditioner_coarse =
2421 doftype_in_master_preconditioner_coarse;
2436 template<
typename MATRIX>
2443 if (this->is_master_block_preconditioner())
2445 std::ostringstream err_msg;
2449 err_msg <<
"No meshes have been set for this block preconditioner!\n" 2450 <<
"Set one with set_nmesh(...), set_mesh(...)" << std::endl;
2452 OOMPH_CURRENT_FUNCTION,
2453 OOMPH_EXCEPTION_LOCATION);
2454 for (
unsigned m=0;m<n;m++)
2458 err_msg <<
"The mesh pointer to mesh " << m <<
" is null!\n" 2459 <<
"Set a non-null one with set_mesh(...)" << std::endl;
2461 OOMPH_CURRENT_FUNCTION,
2462 OOMPH_EXCEPTION_LOCATION);
2471 unsigned internal_n_dof_types = ndof_types();
2476 for (
unsigned i = 0;
i < internal_n_dof_types;
i++)
2478 dof_to_block_lookup[
i] =
i;
2482 this->block_setup(dof_to_block_lookup);
2500 const unsigned n_block_types = nblock_types();
2504 if ((required_blocks.
nrow() != n_block_types) ||
2505 (required_blocks.
ncol() != n_block_types))
2508 std::ostringstream error_message;
2509 error_message <<
"The size of the matrix of bools required_blocks " 2510 <<
"(which indicates which blocks are required) is not the " 2511 <<
"right size, required_blocks is " 2512 << required_blocks.
ncol()
2513 <<
" x " << required_blocks.
nrow() <<
", whereas it should " 2514 <<
"be " << n_block_types <<
" x " << n_block_types;
2516 OOMPH_CURRENT_FUNCTION,
2517 OOMPH_EXCEPTION_LOCATION);
2521 if ((block_matrix_pt.
nrow() != n_block_types) ||
2522 (block_matrix_pt.
ncol() != n_block_types))
2524 std::ostringstream error_message;
2525 error_message <<
"The size of the block matrix pt is not the " 2526 <<
"right size, block_matrix_pt is " 2527 << block_matrix_pt.
ncol()
2528 <<
" x " << block_matrix_pt.
nrow() <<
", whereas it should " 2529 <<
"be " << n_block_types <<
" x " << n_block_types;
2531 OOMPH_CURRENT_FUNCTION,
2532 OOMPH_EXCEPTION_LOCATION);
2538 for (
unsigned i = 0;
i < n_block_types;
i++)
2540 for (
unsigned j = 0; j < n_block_types; j++)
2543 if (required_blocks(
i,j))
2546 block_matrix_pt(
i,j) =
new MATRIX;
2547 get_block(
i, j, *block_matrix_pt(
i,j));
2553 block_matrix_pt(
i,j) = 0;
2577 std::ostringstream err_msg;
2578 err_msg <<
"The distribution of the global vector v must be setup.";
2580 OOMPH_CURRENT_FUNCTION,
2581 OOMPH_EXCEPTION_LOCATION);
2590 std::ostringstream err_msg;
2591 err_msg <<
"The distribution of the global vector v must match the " 2592 <<
" specified master_distribution_pt(). \n" 2593 <<
"i.e. Distribution_pt in the master preconditioner";
2595 OOMPH_CURRENT_FUNCTION,
2596 OOMPH_EXCEPTION_LOCATION);
2601 const unsigned para_nblock_types = nblock_types();
2602 const unsigned para_block_vec_number_size = block_vec_number.size();
2603 if(para_block_vec_number_size > para_nblock_types)
2605 std::ostringstream err_msg;
2606 err_msg <<
"You have requested " << para_block_vec_number_size
2607 <<
" number of blocks, (block_vec_number.size() is " 2608 << para_block_vec_number_size <<
").\n" 2609 <<
"But there are only " << para_nblock_types <<
" nblock_types.\n" 2610 <<
"Please make sure that block_vec_number is correctly sized.\n";
2612 OOMPH_CURRENT_FUNCTION,
2613 OOMPH_EXCEPTION_LOCATION);
2620 for (
unsigned i = 0;
i < para_block_vec_number_size;
i++)
2622 const unsigned para_required_block = block_vec_number[
i];
2623 if(para_required_block >= para_nblock_types)
2625 std::ostringstream err_msg;
2626 err_msg <<
"block_vec_number[" <<
i <<
"] is " << para_required_block
2628 <<
"But there are only " << para_nblock_types
2629 <<
" nblock_types.\n";
2631 OOMPH_CURRENT_FUNCTION,
2632 OOMPH_EXCEPTION_LOCATION);
2637 std::set<unsigned> para_set;
2638 for (
unsigned b = 0; b < para_block_vec_number_size; b++)
2640 std::pair<std::set<unsigned>::iterator,
bool> para_set_ret;
2641 para_set_ret = para_set.insert(block_vec_number[b]);
2643 if(!para_set_ret.second)
2645 std::ostringstream err_msg;
2646 err_msg <<
"Error: the block number " 2647 << block_vec_number[b]
2648 <<
" appears twice.\n";
2650 OOMPH_CURRENT_FUNCTION,
2651 OOMPH_EXCEPTION_LOCATION);
2657 const unsigned n_block = block_vec_number.size();
2667 for (
unsigned b = 0; b < n_block; b++)
2669 const unsigned mapped_b = block_vec_number[b];
2670 most_fine_grain_dof.insert(
2671 most_fine_grain_dof.end(),
2672 Block_to_dof_map_fine[mapped_b].begin(),
2673 Block_to_dof_map_fine[mapped_b].end());
2678 internal_get_block_vectors(most_fine_grain_dof,
2679 v,dof_block_vector);
2690 std::map<Vector<unsigned>,
2695 iter = Auxiliary_block_distribution_pt.find(block_vec_number);
2697 if(iter != Auxiliary_block_distribution_pt.end())
2701 w.
build(iter->second);
2708 for (
unsigned b = 0; b < n_block; b++)
2710 tmp_vec_dist_pt[b] = Block_distribution_pt[block_vec_number[b]];
2720 insert_auxiliary_block_distribution(block_vec_number,tmp_dist_pt);
2723 w.
build(tmp_dist_pt);
2728 dof_block_vector,w);
2750 std::ostringstream err_msg;
2751 err_msg <<
"The distribution of the global vector v must be setup.";
2753 OOMPH_CURRENT_FUNCTION,
2754 OOMPH_EXCEPTION_LOCATION);
2763 std::ostringstream err_msg;
2764 err_msg <<
"The distribution of the global vector v must match the " 2765 <<
" specified master_distribution_pt(). \n" 2766 <<
"i.e. Distribution_pt in the master preconditioner";
2768 OOMPH_CURRENT_FUNCTION,
2769 OOMPH_EXCEPTION_LOCATION);
2774 const unsigned para_block_vec_number_size = block_vec_number.size();
2775 const unsigned para_n_block = nblock_types();
2776 if(para_block_vec_number_size > para_n_block)
2778 std::ostringstream err_msg;
2779 err_msg <<
"Trying to return " << para_block_vec_number_size
2780 <<
" block vectors.\n" 2781 <<
"But there are only " << para_n_block <<
" block types.\n";
2783 OOMPH_CURRENT_FUNCTION,
2784 OOMPH_EXCEPTION_LOCATION);
2791 for (
unsigned b = 0; b < para_block_vec_number_size; b++)
2793 const unsigned para_required_block = block_vec_number[b];
2794 if(para_required_block > para_n_block)
2796 std::ostringstream err_msg;
2797 err_msg <<
"block_vec_number[" << b <<
"] is " << para_required_block
2799 <<
"But there are only " << para_n_block <<
" block types.\n";
2801 OOMPH_CURRENT_FUNCTION,
2802 OOMPH_EXCEPTION_LOCATION);
2808 std::set<unsigned> para_set;
2809 for (
unsigned b = 0; b < para_block_vec_number_size; b++)
2811 std::pair<std::set<unsigned>::iterator,
bool> para_set_ret;
2812 para_set_ret = para_set.insert(block_vec_number[b]);
2814 if(!para_set_ret.second)
2816 std::ostringstream err_msg;
2817 err_msg <<
"Error: the block number " 2818 << block_vec_number[b]
2819 <<
" appears twice.\n";
2821 OOMPH_CURRENT_FUNCTION,
2822 OOMPH_EXCEPTION_LOCATION);
2829 std::ostringstream err_msg;
2830 err_msg <<
"The distribution of the block vector w must be setup.";
2832 OOMPH_CURRENT_FUNCTION,
2833 OOMPH_EXCEPTION_LOCATION);
2841 para_block_vec_number_size,0);
2843 for (
unsigned b = 0; b < para_block_vec_number_size; b++)
2845 para_vec_dist_pt[b] = Block_distribution_pt[block_vec_number[b]];
2855 std::ostringstream err_msg;
2856 err_msg <<
"The distribution of the block vector w does not match \n" 2857 <<
"the concatenation of the block distributions defined in \n" 2858 <<
"block_vec_number.\n";
2860 OOMPH_CURRENT_FUNCTION,
2861 OOMPH_EXCEPTION_LOCATION);
2866 const unsigned n_block = block_vec_number.size();
2876 for (
unsigned b = 0; b < n_block; b++)
2878 const unsigned mapped_b = block_vec_number[b];
2879 most_fine_grain_dof.insert(
2880 most_fine_grain_dof.end(),
2881 Block_to_dof_map_fine[mapped_b].begin(),
2882 Block_to_dof_map_fine[mapped_b].end());
2887 const unsigned ndof = most_fine_grain_dof.size();
2891 for (
unsigned d = 0; d < ndof; d++)
2893 dof_vector[d].build(internal_block_distribution_pt(
2894 most_fine_grain_dof[d]));
2901 internal_return_block_vectors(most_fine_grain_dof,
2925 std::ostringstream err_msg;
2926 err_msg <<
"The distribution of the global vector v must be setup.";
2928 OOMPH_CURRENT_FUNCTION,
2929 OOMPH_EXCEPTION_LOCATION);
2938 std::ostringstream err_msg;
2939 err_msg <<
"The distribution of the global vector v must match the " 2940 <<
" specified master_distribution_pt(). \n" 2941 <<
"i.e. Distribution_pt in the master preconditioner";
2943 OOMPH_CURRENT_FUNCTION,
2944 OOMPH_EXCEPTION_LOCATION);
2949 const unsigned para_nblock_types = nblock_types();
2950 const unsigned para_block_vec_number_size = block_vec_number.size();
2951 if(para_block_vec_number_size > para_nblock_types)
2953 std::ostringstream err_msg;
2954 err_msg <<
"You have requested " << para_block_vec_number_size
2955 <<
" number of blocks, (block_vec_number.size() is " 2956 << para_block_vec_number_size <<
").\n" 2957 <<
"But there are only " << para_nblock_types <<
" nblock_types.\n" 2958 <<
"Please make sure that block_vec_number is correctly sized.\n";
2960 OOMPH_CURRENT_FUNCTION,
2961 OOMPH_EXCEPTION_LOCATION);
2968 for (
unsigned i = 0;
i < para_block_vec_number_size;
i++)
2970 const unsigned para_required_block = block_vec_number[
i];
2971 if(para_required_block > para_nblock_types)
2973 std::ostringstream err_msg;
2974 err_msg <<
"block_vec_number[" <<
i <<
"] is " << para_required_block
2976 <<
"But there are only " << para_nblock_types
2977 <<
" nblock_types.\n";
2979 OOMPH_CURRENT_FUNCTION,
2980 OOMPH_EXCEPTION_LOCATION);
2984 std::set<unsigned> para_set;
2985 for (
unsigned b = 0; b < para_block_vec_number_size; b++)
2987 std::pair<std::set<unsigned>::iterator,
bool> para_set_ret;
2988 para_set_ret = para_set.insert(block_vec_number[b]);
2990 if(!para_set_ret.second)
2992 std::ostringstream err_msg;
2993 err_msg <<
"Error: the block number " 2994 << block_vec_number[b]
2995 <<
" appears twice.\n";
2997 OOMPH_CURRENT_FUNCTION,
2998 OOMPH_EXCEPTION_LOCATION);
3004 const unsigned n_block = block_vec_number.size();
3015 for (
unsigned b = 0; b < n_block; b++)
3017 const unsigned mapped_b = block_vec_number[b];
3019 most_fine_grain_dof.insert(
3020 most_fine_grain_dof.end(),
3021 Block_to_dof_map_fine[mapped_b].begin(),
3022 Block_to_dof_map_fine[mapped_b].end());
3027 internal_get_block_vectors(most_fine_grain_dof,
3037 unsigned offset = 0;
3039 for (
unsigned b = 0; b < n_block; b++)
3042 const unsigned mapped_b = block_vec_number[b];
3045 const unsigned n_dof = Block_to_dof_map_fine[mapped_b].size();
3050 s[b] = dof_vector[offset];
3055 s[b].build(Block_distribution_pt[mapped_b],0);
3057 for (
unsigned vec_i = 0; vec_i < n_dof; vec_i++)
3059 tmp_vec_pt[vec_i] = &dof_vector[offset + vec_i];
3088 const unsigned n_block = nblock_types();
3092 for (
unsigned i = 0;
i < n_block;
i++)
3094 required_block[
i] =
i;
3098 get_block_vectors(required_block,v,s);
3120 std::ostringstream error_message;
3121 error_message <<
"The distribution of the global vector v must be setup.";
3123 OOMPH_CURRENT_FUNCTION,
3124 OOMPH_EXCEPTION_LOCATION);
3128 std::ostringstream error_message;
3129 error_message <<
"The distribution of the global vector v must match the " 3130 <<
" specified master_distribution_pt(). \n" 3131 <<
"i.e. Distribution_pt in the master preconditioner";
3133 OOMPH_CURRENT_FUNCTION,
3134 OOMPH_EXCEPTION_LOCATION);
3140 const unsigned nblock = block_vec_number.size();
3145 if (this->distribution_pt()->communicator_pt()->nproc() == 1 ||
3146 !this->distribution_pt()->distributed())
3156 for (
unsigned b = 0; b < nblock; b++)
3158 const unsigned required_block = block_vec_number[b];
3159 s[b].build(Internal_block_distribution_pt[required_block],0.0);
3160 double* s_pt = s[b].values_pt();
3161 unsigned nrow = s[b].nrow();
3162 for (
unsigned i = 0;
i < nrow;
i++)
3164 s_pt[
i] = v_pt[this->Global_index[required_block][
i]];
3171 #ifdef OOMPH_HAS_MPI 3173 unsigned my_rank = this->distribution_pt()->communicator_pt()->my_rank();
3176 unsigned nproc = this->distribution_pt()->communicator_pt()->nproc();
3180 for (
unsigned b = 0; b < nblock; b++)
3182 const unsigned required_block = block_vec_number[b];
3183 s[b].build(Internal_block_distribution_pt[required_block],0.0);
3191 unsigned max_n_send_or_recv = 0;
3192 for (
unsigned p = 0; p < nproc; p++)
3194 for (
unsigned b = 0; b < nblock; b++)
3196 const unsigned required_block = block_vec_number[b];
3197 max_n_send_or_recv =
3198 std::max(max_n_send_or_recv,Nrows_to_send_for_get_block(required_block,p));
3199 max_n_send_or_recv =
3200 std::max(max_n_send_or_recv,Nrows_to_recv_for_get_block(required_block,p));
3201 if (Nrows_to_send_for_get_block(required_block,p) > 0)
3205 if (Nrows_to_recv_for_get_block(required_block,p) > 0)
3214 int* block_lengths =
new int[max_n_send_or_recv];
3215 for (
unsigned i = 0;
i < max_n_send_or_recv;
i++)
3217 block_lengths[
i] = 1;
3222 for (
unsigned p = 0; p < nproc; p++)
3228 if (nblock_send[p] > 0)
3231 MPI_Datatype block_send_types[nblock_send[p]];
3235 for (
unsigned b = 0; b < nblock; b++)
3237 const unsigned required_block = block_vec_number[b];
3239 if (Nrows_to_send_for_get_block(required_block,p) > 0)
3241 MPI_Type_indexed(Nrows_to_send_for_get_block(required_block,p),block_lengths,
3242 Rows_to_send_for_get_block(required_block,p),MPI_DOUBLE,
3243 &block_send_types[ptr]);
3244 MPI_Type_commit(&block_send_types[ptr]);
3250 MPI_Aint displacements[nblock_send[p]];
3251 int lengths[nblock_send[p]];
3252 for (
int i = 0;
i < nblock_send[p];
i++)
3255 displacements[
i] = 0;
3259 MPI_Datatype type_send;
3260 MPI_Type_create_struct(nblock_send[p],lengths,displacements,
3261 block_send_types,&type_send);
3262 MPI_Type_commit(&type_send);
3265 MPI_Request send_req;
3266 MPI_Isend(const_cast<double*>(v.
values_pt()),1,type_send,p,0,
3267 this->distribution_pt()->communicator_pt()->mpi_comm(),
3269 MPI_Type_free(&type_send);
3270 for (
int i = 0;
i < nblock_send[p];
i++)
3272 MPI_Type_free(&block_send_types[
i]);
3274 requests.push_back(send_req);
3278 if (nblock_recv[p] > 0)
3281 MPI_Datatype block_recv_types[nblock_recv[p]];
3284 MPI_Aint displacements[nblock_recv[p]];
3287 int lengths[nblock_recv[p]];
3290 MPI_Aint displacements_base;
3291 MPI_Get_address(s[0].values_pt(),&displacements_base);
3295 for (
unsigned b = 0; b < nblock; b++)
3297 const unsigned required_block = block_vec_number[b];
3299 if (Nrows_to_recv_for_get_block(required_block,p) > 0)
3301 MPI_Type_indexed(Nrows_to_recv_for_get_block(required_block,p),block_lengths,
3302 Rows_to_recv_for_get_block(required_block,p),MPI_DOUBLE,
3303 &block_recv_types[ptr]);
3304 MPI_Type_commit(&block_recv_types[ptr]);
3305 MPI_Get_address(s[b].values_pt(),&displacements[ptr]);
3306 displacements[ptr] -= displacements_base;
3313 MPI_Datatype type_recv;
3314 MPI_Type_create_struct(nblock_recv[p],lengths,displacements,
3315 block_recv_types,&type_recv);
3316 MPI_Type_commit(&type_recv);
3319 MPI_Request recv_req;
3320 MPI_Irecv(s[0].values_pt(),1,type_recv,p,0,
3321 this->distribution_pt()->communicator_pt()->mpi_comm(),
3323 MPI_Type_free(&type_recv);
3324 for (
int i = 0;
i < nblock_recv[p];
i++)
3326 MPI_Type_free(&block_recv_types[
i]);
3328 requests.push_back(recv_req);
3335 const double* v_values_pt = v.
values_pt();
3336 for (
unsigned b = 0; b < nblock; b++)
3338 const unsigned required_block = block_vec_number[b];
3340 double* w_values_pt = s[b].values_pt();
3341 for (
unsigned i = 0;
i < Nrows_to_send_for_get_block(required_block,p);
i++)
3343 w_values_pt[Rows_to_recv_for_get_block(required_block,p)[
i]] =
3344 v_values_pt[Rows_to_send_for_get_block(required_block,p)[
i]];
3351 unsigned c = requests.size();
3355 MPI_Waitall(c,&requests[0],&stat[0]);
3357 delete[] block_lengths;
3361 std::ostringstream error_message;
3362 error_message <<
"The preconditioner is distributed and on more than one " 3363 <<
"processor. MPI is required.";
3365 OOMPH_CURRENT_FUNCTION,
3366 OOMPH_EXCEPTION_LOCATION);
3389 const unsigned nblock = this->internal_nblock_types();
3391 for (
unsigned b = 0; b < nblock; b++)
3393 block_vec_number[b] = b;
3396 internal_get_block_vectors(block_vec_number,v,s);
3416 std::ostringstream err_msg;
3417 err_msg <<
"The distribution of the global vector v must be setup.";
3419 OOMPH_CURRENT_FUNCTION,
3420 OOMPH_EXCEPTION_LOCATION);
3429 std::ostringstream err_msg;
3430 err_msg <<
"The distribution of the global vector v must match the " 3431 <<
" specified master_distribution_pt(). \n" 3432 <<
"i.e. Distribution_pt in the master preconditioner";
3434 OOMPH_CURRENT_FUNCTION,
3435 OOMPH_EXCEPTION_LOCATION);
3440 const unsigned para_block_vec_number_size = block_vec_number.size();
3441 const unsigned para_s_size = s.size();
3442 if(para_block_vec_number_size != para_s_size)
3444 std::ostringstream err_msg;
3445 err_msg <<
"block_vec_number.size() is " << para_block_vec_number_size
3447 <<
"s.size() is " << para_s_size <<
".\n" 3448 <<
"But they must be the same size!\n";
3450 OOMPH_CURRENT_FUNCTION,
3451 OOMPH_EXCEPTION_LOCATION);
3456 const unsigned para_n_block = nblock_types();
3457 if(para_block_vec_number_size > para_n_block)
3459 std::ostringstream err_msg;
3460 err_msg <<
"Trying to return " << para_block_vec_number_size
3461 <<
" block vectors.\n" 3462 <<
"But there are only " << para_n_block <<
" block types.\n";
3464 OOMPH_CURRENT_FUNCTION,
3465 OOMPH_EXCEPTION_LOCATION);
3472 for (
unsigned b = 0; b < para_block_vec_number_size; b++)
3474 const unsigned para_required_block = block_vec_number[b];
3475 if(para_required_block > para_n_block)
3477 std::ostringstream err_msg;
3478 err_msg <<
"block_vec_number[" << b <<
"] is " << para_required_block
3480 <<
"But there are only " << para_n_block <<
" block types.\n";
3482 OOMPH_CURRENT_FUNCTION,
3483 OOMPH_EXCEPTION_LOCATION);
3489 std::set<unsigned> para_set;
3490 for (
unsigned b = 0; b < para_block_vec_number_size; b++)
3492 std::pair<std::set<unsigned>::iterator,
bool> para_set_ret;
3493 para_set_ret = para_set.insert(block_vec_number[b]);
3495 if(!para_set_ret.second)
3497 std::ostringstream err_msg;
3498 err_msg <<
"Error: the block number " 3499 << block_vec_number[b]
3500 <<
" appears twice.\n";
3502 OOMPH_CURRENT_FUNCTION,
3503 OOMPH_EXCEPTION_LOCATION);
3509 for (
unsigned b = 0; b < para_block_vec_number_size; b++)
3513 std::ostringstream err_msg;
3514 err_msg <<
"The distribution of the block vector s[" 3515 << b <<
"] must be setup.\n";
3517 OOMPH_CURRENT_FUNCTION,
3518 OOMPH_EXCEPTION_LOCATION);
3525 for (
unsigned b = 0; b < para_block_vec_number_size; b++)
3527 if (*(s[b].distribution_pt()) !=
3528 *(Block_distribution_pt[block_vec_number[b]]))
3530 std::ostringstream error_message;
3531 error_message <<
"The distribution of the block vector " << b
3532 <<
" must match the" 3533 <<
" specified distribution at " 3534 <<
"Block_distribution_pt[" 3535 << block_vec_number[b] <<
"].\n" 3536 <<
"The distribution of the Block_distribution_pt is determined by\n" 3537 <<
"the vector block_vec_number. Perhaps it is incorrect?\n";
3539 OOMPH_CURRENT_FUNCTION,
3540 OOMPH_EXCEPTION_LOCATION);
3546 const unsigned n_block = block_vec_number.size();
3556 for (
unsigned b = 0; b < n_block; b++)
3558 const unsigned mapped_b = block_vec_number[b];
3560 most_fine_grain_dof.insert(most_fine_grain_dof.end(),
3561 Block_to_dof_map_fine[mapped_b].begin(),
3562 Block_to_dof_map_fine[mapped_b].end());
3568 unsigned offset = 0;
3571 for (
unsigned b = 0; b < n_block; b++)
3574 const unsigned mapped_b = block_vec_number[b];
3577 const unsigned ndof = Block_to_dof_map_fine[mapped_b].size();
3582 dof_vector[offset] = s[b];
3590 for (
unsigned d = 0; d < ndof; d++)
3592 const unsigned offset_plus_d = offset + d;
3595 dof_vector[offset_plus_d].build(
3596 Internal_block_distribution_pt[
3597 most_fine_grain_dof[offset_plus_d]]);
3600 tmp_dof_vector_pt[d] = &dof_vector[offset_plus_d];
3605 s[b],tmp_dof_vector_pt);
3613 internal_return_block_vectors(most_fine_grain_dof,
3634 const unsigned n_block = nblock_types();
3638 for (
unsigned i = 0;
i < n_block;
i++)
3640 required_block[
i] =
i;
3644 return_block_vectors(required_block,s,v);
3664 const unsigned nblock = block_vec_number.size();
3669 std::ostringstream error_message;
3670 error_message <<
"The distribution of the global vector v must be setup.";
3672 OOMPH_CURRENT_FUNCTION,
3673 OOMPH_EXCEPTION_LOCATION);
3677 std::ostringstream error_message;
3678 error_message <<
"The distribution of the global vector v must match the " 3679 <<
" specified master_distribution_pt(). \n" 3680 <<
"i.e. Distribution_pt in the master preconditioner";
3682 OOMPH_CURRENT_FUNCTION,
3683 OOMPH_EXCEPTION_LOCATION);
3685 for (
unsigned b = 0; b < nblock; b++)
3689 std::ostringstream error_message;
3690 error_message <<
"The distribution of the block vector " << b
3691 <<
" must be setup.";
3693 OOMPH_CURRENT_FUNCTION,
3694 OOMPH_EXCEPTION_LOCATION);
3696 const unsigned required_block = block_vec_number[b];
3697 if (*(s[b].distribution_pt()) != *(Internal_block_distribution_pt[required_block]))
3699 std::ostringstream error_message;
3700 error_message <<
"The distribution of the block vector " << b
3701 <<
" must match the" 3702 <<
" specified distribution at Internal_block_distribution_pt[" 3705 OOMPH_CURRENT_FUNCTION,
3706 OOMPH_EXCEPTION_LOCATION);
3714 if (this->distribution_pt()->communicator_pt()->nproc() == 1 ||
3715 !this->distribution_pt()->distributed())
3718 for (
unsigned b = 0; b < nblock; b++)
3720 const unsigned required_block = block_vec_number[b];
3722 const double* s_pt = s[b].values_pt();
3723 unsigned nrow = this->internal_block_dimension(required_block);
3724 for (
unsigned i = 0;
i < nrow;
i++)
3726 v_pt[this->Global_index[required_block][
i]] = s_pt[
i];
3733 #ifdef OOMPH_HAS_MPI 3736 unsigned my_rank = this->distribution_pt()->communicator_pt()->my_rank();
3739 unsigned nproc = this->distribution_pt()->communicator_pt()->nproc();
3746 unsigned max_n_send_or_recv = 0;
3747 for (
unsigned p = 0; p < nproc; p++)
3749 for (
unsigned b = 0; b < nblock; b++)
3751 const unsigned required_block = block_vec_number[b];
3753 max_n_send_or_recv =
3754 std::max(max_n_send_or_recv,Nrows_to_send_for_get_block(required_block,p));
3755 max_n_send_or_recv =
3756 std::max(max_n_send_or_recv,Nrows_to_recv_for_get_block(required_block,p));
3757 if (Nrows_to_send_for_get_block(required_block,p) > 0)
3761 if (Nrows_to_recv_for_get_block(required_block,p) > 0)
3770 int* block_lengths =
new int[max_n_send_or_recv];
3771 for (
unsigned i = 0;
i < max_n_send_or_recv;
i++)
3773 block_lengths[
i] = 1;
3778 for (
unsigned p = 0; p < nproc; p++)
3784 if (nblock_recv[p] > 0)
3787 MPI_Datatype block_recv_types[nblock_recv[p]];
3791 for (
unsigned b = 0; b < nblock; b++)
3793 const unsigned required_block = block_vec_number[b];
3795 if (Nrows_to_send_for_get_block(required_block,p) > 0)
3797 MPI_Type_indexed(Nrows_to_send_for_get_block(required_block,p),block_lengths,
3798 Rows_to_send_for_get_block(required_block,p),MPI_DOUBLE,
3799 &block_recv_types[ptr]);
3800 MPI_Type_commit(&block_recv_types[ptr]);
3806 MPI_Aint displacements[nblock_recv[p]];
3807 int lengths[nblock_recv[p]];
3808 for (
int i = 0;
i < nblock_recv[p];
i++)
3811 displacements[
i] = 0;
3815 MPI_Datatype type_recv;
3816 MPI_Type_create_struct(nblock_recv[p],lengths,displacements,
3817 block_recv_types,&type_recv);
3818 MPI_Type_commit(&type_recv);
3821 MPI_Request recv_req;
3822 MPI_Irecv(v.
values_pt(),1,type_recv,p,0,
3823 this->distribution_pt()->communicator_pt()->mpi_comm(),
3825 MPI_Type_free(&type_recv);
3826 for (
int i = 0;
i < nblock_recv[p];
i++)
3828 MPI_Type_free(&block_recv_types[
i]);
3830 requests.push_back(recv_req);
3834 if (nblock_send[p] > 0)
3837 MPI_Datatype block_send_types[nblock_send[p]];
3840 MPI_Aint displacements[nblock_send[p]];
3843 int lengths[nblock_send[p]];
3846 MPI_Aint displacements_base;
3847 MPI_Get_address(const_cast<double*>(s[0].values_pt()),
3848 &displacements_base);
3852 for (
unsigned b = 0; b < nblock; b++)
3854 const unsigned required_block = block_vec_number[b];
3856 if (Nrows_to_recv_for_get_block(required_block,p) > 0)
3858 MPI_Type_indexed(Nrows_to_recv_for_get_block(required_block,p),block_lengths,
3859 Rows_to_recv_for_get_block(required_block,p),MPI_DOUBLE,
3860 &block_send_types[ptr]);
3861 MPI_Type_commit(&block_send_types[ptr]);
3862 MPI_Get_address(const_cast<double*>(s[b].values_pt()),
3863 &displacements[ptr]);
3864 displacements[ptr] -= displacements_base;
3871 MPI_Datatype type_send;
3872 MPI_Type_create_struct(nblock_send[p],lengths,displacements,
3873 block_send_types,&type_send);
3874 MPI_Type_commit(&type_send);
3877 MPI_Request send_req;
3878 MPI_Isend(const_cast<double*>(s[0].values_pt()),1,type_send,p,0,
3879 this->distribution_pt()->communicator_pt()->mpi_comm(),
3881 MPI_Type_free(&type_send);
3882 for (
int i = 0;
i < nblock_send[p];
i++)
3884 MPI_Type_free(&block_send_types[
i]);
3886 requests.push_back(send_req);
3894 for (
unsigned b = 0; b < nblock; b++)
3896 const unsigned required_block = block_vec_number[b];
3898 const double* w_values_pt = s[b].values_pt();
3899 for (
unsigned i = 0;
i < Nrows_to_send_for_get_block(required_block,p);
i++)
3901 v_values_pt[Rows_to_send_for_get_block(required_block,p)[
i]] =
3902 w_values_pt[Rows_to_recv_for_get_block(required_block,p)[
i]];
3910 unsigned c = requests.size();
3914 MPI_Waitall(c,&requests[0],&stat[0]);
3916 delete[] block_lengths;
3920 std::ostringstream error_message;
3921 error_message <<
"The preconditioner is distributed and on more than one " 3922 <<
"processor. MPI is required.";
3924 OOMPH_CURRENT_FUNCTION,
3925 OOMPH_EXCEPTION_LOCATION);
3948 const unsigned nblock = this->internal_nblock_types();
3950 for (
unsigned b = 0; b < nblock; b++)
3952 block_vec_number[b] = b;
3955 internal_return_block_vectors(block_vec_number,s,v);
3973 const unsigned n_blocks = this->internal_nblock_types();
3978 std::ostringstream error_message;
3979 error_message <<
"Requested block vector " << b
3980 <<
", however this preconditioner has internal_nblock_types() " 3981 <<
"= " << internal_nblock_types() << std::endl;
3983 OOMPH_CURRENT_FUNCTION,
3984 OOMPH_EXCEPTION_LOCATION);
3988 std::ostringstream error_message;
3989 error_message <<
"The distribution of the global vector v must be setup.";
3991 OOMPH_CURRENT_FUNCTION,
3992 OOMPH_EXCEPTION_LOCATION);
3996 std::ostringstream error_message;
3997 error_message <<
"The distribution of the global vector v must match the " 3998 <<
" specified master_distribution_pt(). \n" 3999 <<
"i.e. Distribution_pt in the master preconditioner";
4001 OOMPH_CURRENT_FUNCTION,
4002 OOMPH_EXCEPTION_LOCATION);
4007 w.
build(Internal_block_distribution_pt[b],0.0);
4012 if (this->distribution_pt()->communicator_pt()->nproc() == 1 ||
4013 !this->distribution_pt()->distributed())
4017 unsigned n_row = w.
nrow();
4018 for (
unsigned i = 0;
i < n_row;
i++)
4020 w_pt[
i] = v_pt[this->Global_index[b][
i]];
4026 #ifdef OOMPH_HAS_MPI 4029 unsigned my_rank = this->distribution_pt()->communicator_pt()->my_rank();
4032 unsigned nproc = this->distribution_pt()->communicator_pt()->nproc();
4035 unsigned max_n_send_or_recv = 0;
4036 for (
unsigned p = 0; p < nproc; p++)
4038 max_n_send_or_recv =
4039 std::max(max_n_send_or_recv,Nrows_to_send_for_get_block(b,p));
4040 max_n_send_or_recv =
4041 std::max(max_n_send_or_recv,Nrows_to_recv_for_get_block(b,p));
4046 int* block_lengths =
new int[max_n_send_or_recv];
4047 for (
unsigned i = 0;
i < max_n_send_or_recv;
i++)
4049 block_lengths[
i] = 1;
4054 for (
unsigned p = 0; p < nproc; p++)
4059 if (Nrows_to_send_for_get_block(b,p) > 0)
4062 MPI_Datatype type_send;
4063 MPI_Type_indexed(Nrows_to_send_for_get_block(b,p),block_lengths,
4064 Rows_to_send_for_get_block(b,p),MPI_DOUBLE,
4066 MPI_Type_commit(&type_send);
4069 MPI_Request send_req;
4070 MPI_Isend(const_cast<double*>(v.
values_pt()),1,type_send,p,0,
4071 this->distribution_pt()->communicator_pt()->mpi_comm(),
4073 MPI_Type_free(&type_send);
4074 requests.push_back(send_req);
4077 if (Nrows_to_recv_for_get_block(b,p) > 0)
4080 MPI_Datatype type_recv;
4081 MPI_Type_indexed(Nrows_to_recv_for_get_block(b,p),block_lengths,
4082 Rows_to_recv_for_get_block(b,p),MPI_DOUBLE,
4084 MPI_Type_commit(&type_recv);
4087 MPI_Request recv_req;
4088 MPI_Irecv(w.
values_pt(),1,type_recv,p,0,
4089 this->distribution_pt()->communicator_pt()->mpi_comm(),
4091 MPI_Type_free(&type_recv);
4092 requests.push_back(recv_req);
4100 const double* v_values_pt = v.
values_pt();
4101 for (
unsigned i = 0;
i < Nrows_to_send_for_get_block(b,p);
i++)
4103 w_values_pt[Rows_to_recv_for_get_block(b,p)[
i]] =
4104 v_values_pt[Rows_to_send_for_get_block(b,p)[
i]];
4110 unsigned c = requests.size();
4114 MPI_Waitall(c,&requests[0],&stat[0]);
4116 delete[] block_lengths;
4120 std::ostringstream error_message;
4121 error_message <<
"The preconditioner is distributed and on more than one " 4122 <<
"processor. MPI is required.";
4124 OOMPH_CURRENT_FUNCTION,
4125 OOMPH_EXCEPTION_LOCATION);
4141 const unsigned para_n_blocks = nblock_types();
4144 if (b >= para_n_blocks)
4146 std::ostringstream err_msg;
4147 err_msg <<
"Requested block vector " << b
4148 <<
", however this preconditioner has only " 4149 << para_n_blocks <<
" block types" <<
".\n";
4151 OOMPH_CURRENT_FUNCTION,
4152 OOMPH_EXCEPTION_LOCATION);
4157 std::ostringstream err_msg;
4158 err_msg <<
"The distribution of the global vector v must be setup.";
4160 OOMPH_CURRENT_FUNCTION,
4161 OOMPH_EXCEPTION_LOCATION);
4165 std::ostringstream err_msg;
4166 err_msg <<
"The distribution of the global vector v must match the " 4167 <<
" specified master_distribution_pt(). \n" 4168 <<
"i.e. Distribution_pt in the master preconditioner";
4170 OOMPH_CURRENT_FUNCTION,
4171 OOMPH_EXCEPTION_LOCATION);
4207 const unsigned n_dof_vec = most_fine_grain_dof.size();
4212 internal_get_block_vector(most_fine_grain_dof[0],v,w);
4220 internal_get_block_vectors(most_fine_grain_dof,
4223 w.
build(Block_distribution_pt[b],0);
4252 const unsigned n_blocks = this->internal_nblock_types();
4257 std::ostringstream error_message;
4258 error_message <<
"Requested block vector " << b
4259 <<
", however this preconditioner has internal_nblock_types() " 4260 <<
"= " << internal_nblock_types() << std::endl;
4262 OOMPH_CURRENT_FUNCTION,
4263 OOMPH_EXCEPTION_LOCATION);
4267 std::ostringstream error_message;
4268 error_message <<
"The distribution of the global vector v must be setup.";
4270 OOMPH_CURRENT_FUNCTION,
4271 OOMPH_EXCEPTION_LOCATION);
4275 std::ostringstream error_message;
4276 error_message <<
"The distribution of the global vector v must match the " 4277 <<
" specified master_distribution_pt(). \n" 4278 <<
"i.e. Distribution_pt in the master preconditioner";
4280 OOMPH_CURRENT_FUNCTION,
4281 OOMPH_EXCEPTION_LOCATION);
4285 std::ostringstream error_message;
4286 error_message <<
"The distribution of the block vector w must be setup.";
4288 OOMPH_CURRENT_FUNCTION,
4289 OOMPH_EXCEPTION_LOCATION);
4293 std::ostringstream error_message;
4294 error_message <<
"The distribution of the block vector w must match the " 4295 <<
" specified distribution at Internal_block_distribution_pt[b]";
4297 OOMPH_CURRENT_FUNCTION,
4298 OOMPH_EXCEPTION_LOCATION);
4305 if (this->distribution_pt()->communicator_pt()->nproc() == 1 ||
4306 !this->distribution_pt()->distributed())
4310 unsigned n_row = this->internal_block_dimension(b);
4315 for (
unsigned i = 0;
i < n_row;
i++)
4317 v_pt[this->Global_index[b][
i]] = w_pt[
i];
4323 #ifdef OOMPH_HAS_MPI 4326 unsigned my_rank = this->distribution_pt()->communicator_pt()->my_rank();
4329 unsigned nproc = this->distribution_pt()->communicator_pt()->nproc();
4332 unsigned max_n_send_or_recv = 0;
4333 for (
unsigned p = 0; p < nproc; p++)
4335 max_n_send_or_recv =
4336 std::max(max_n_send_or_recv,Nrows_to_send_for_get_block(b,p));
4337 max_n_send_or_recv =
4338 std::max(max_n_send_or_recv,Nrows_to_recv_for_get_block(b,p));
4343 int* block_lengths =
new int[max_n_send_or_recv];
4344 for (
unsigned i = 0;
i < max_n_send_or_recv;
i++)
4346 block_lengths[
i] = 1;
4351 for (
unsigned p = 0; p < nproc; p++)
4356 if (Nrows_to_recv_for_get_block(b,p) > 0)
4359 MPI_Datatype type_send;
4360 MPI_Type_indexed(Nrows_to_recv_for_get_block(b,p),block_lengths,
4361 Rows_to_recv_for_get_block(b,p),MPI_DOUBLE,
4363 MPI_Type_commit(&type_send);
4366 MPI_Request send_req;
4367 MPI_Isend(const_cast<double*>(w.
values_pt()),1,type_send,p,0,
4368 this->distribution_pt()->communicator_pt()->mpi_comm(),
4370 MPI_Type_free(&type_send);
4371 requests.push_back(send_req);
4374 if (Nrows_to_send_for_get_block(b,p) > 0)
4377 MPI_Datatype type_recv;
4378 MPI_Type_indexed(Nrows_to_send_for_get_block(b,p),block_lengths,
4379 Rows_to_send_for_get_block(b,p),MPI_DOUBLE,
4381 MPI_Type_commit(&type_recv);
4384 MPI_Request recv_req;
4385 MPI_Irecv(v.
values_pt(),1,type_recv,p,0,
4386 this->distribution_pt()->communicator_pt()->mpi_comm(),
4388 MPI_Type_free(&type_recv);
4389 requests.push_back(recv_req);
4396 const double* w_values_pt = w.
values_pt();
4398 for (
unsigned i = 0;
i < Nrows_to_send_for_get_block(b,p);
i++)
4400 v_values_pt[Rows_to_send_for_get_block(b,p)[
i]] =
4401 w_values_pt[Rows_to_recv_for_get_block(b,p)[
i]];
4407 unsigned c = requests.size();
4411 MPI_Waitall(c,&requests[0],&stat[0]);
4413 delete[] block_lengths;
4417 std::ostringstream error_message;
4418 error_message <<
"The preconditioner is distributed and on more than one " 4419 <<
"processor. MPI is required.";
4421 OOMPH_CURRENT_FUNCTION,
4422 OOMPH_EXCEPTION_LOCATION);
4441 const unsigned para_n_blocks = nblock_types();
4444 if (n >= para_n_blocks)
4446 std::ostringstream err_msg;
4447 err_msg <<
"Requested block vector " << b
4448 <<
", however this preconditioner has " << para_n_blocks
4449 <<
" block types.\n";
4451 OOMPH_CURRENT_FUNCTION,
4452 OOMPH_EXCEPTION_LOCATION);
4456 std::ostringstream err_msg;
4457 err_msg <<
"The distribution of the global vector v must be setup.";
4459 OOMPH_CURRENT_FUNCTION,
4460 OOMPH_EXCEPTION_LOCATION);
4464 std::ostringstream err_msg;
4465 err_msg <<
"The distribution of the global vector v must match the " 4466 <<
" specified master_distribution_pt(). \n" 4467 <<
"i.e. Distribution_pt in the master preconditioner";
4469 OOMPH_CURRENT_FUNCTION,
4470 OOMPH_EXCEPTION_LOCATION);
4474 std::ostringstream err_msg;
4475 err_msg <<
"The distribution of the block vector b must be setup.";
4477 OOMPH_CURRENT_FUNCTION,
4478 OOMPH_EXCEPTION_LOCATION);
4487 const unsigned n_dof_vec = Block_to_dof_map_fine[n].size();
4492 internal_return_block_vector(most_fine_grain_dof[0],b,v);
4498 for (
unsigned d = 0; d < n_dof_vec; d++)
4500 dof_vector[d].build(internal_block_distribution_pt(
4501 most_fine_grain_dof[d]));
4507 internal_return_block_vectors(most_fine_grain_dof,
4556 std::ostringstream error_message;
4557 error_message <<
"The distribution of the global vector v must be setup.";
4559 OOMPH_CURRENT_FUNCTION,
4560 OOMPH_EXCEPTION_LOCATION);
4564 std::ostringstream error_message;
4565 error_message <<
"The distribution of the global vector v must match the " 4566 <<
" specified master_distribution_pt(). \n" 4567 <<
"i.e. Distribution_pt in the master preconditioner";
4569 OOMPH_CURRENT_FUNCTION,
4570 OOMPH_EXCEPTION_LOCATION);
4575 w.
build(this->internal_preconditioner_matrix_distribution_pt(),0.0);
4580 if (this->distribution_pt()->communicator_pt()->nproc() == 1 ||
4581 !this->distribution_pt()->distributed())
4585 unsigned nblock = this->Internal_nblock_types;
4588 unsigned block_offset = 0;
4591 for (
unsigned b = 0; b < nblock;b++)
4593 unsigned block_nrow = this->internal_block_dimension(b);
4594 for (
unsigned i = 0;
i < block_nrow;
i++)
4596 w_pt[block_offset+
i] = v_pt[this->Global_index[b][
i]];
4598 block_offset += block_nrow;
4604 #ifdef OOMPH_HAS_MPI 4607 unsigned my_rank = this->distribution_pt()->communicator_pt()->my_rank();
4610 unsigned nproc = this->distribution_pt()->communicator_pt()->nproc();
4613 unsigned max_n_send_or_recv = 0;
4614 for (
unsigned p = 0; p < nproc; p++)
4616 max_n_send_or_recv =
4617 std::max(max_n_send_or_recv,Nrows_to_send_for_get_ordered[p]);
4618 max_n_send_or_recv =
4619 std::max(max_n_send_or_recv,Nrows_to_recv_for_get_ordered[p]);
4624 int* block_lengths =
new int[max_n_send_or_recv];
4625 for (
unsigned i = 0;
i < max_n_send_or_recv;
i++)
4627 block_lengths[
i] = 1;
4632 for (
unsigned p = 0; p < nproc; p++)
4637 if (Nrows_to_send_for_get_ordered[p] > 0)
4640 MPI_Datatype type_send;
4641 MPI_Type_indexed(Nrows_to_send_for_get_ordered[p],block_lengths,
4642 Rows_to_send_for_get_ordered[p],MPI_DOUBLE,
4644 MPI_Type_commit(&type_send);
4647 MPI_Request send_req;
4648 MPI_Isend(const_cast<double*>(v.
values_pt()),1,type_send,p,0,
4649 this->distribution_pt()->communicator_pt()->mpi_comm(),
4651 MPI_Type_free(&type_send);
4652 requests.push_back(send_req);
4655 if (Nrows_to_recv_for_get_ordered[p] > 0)
4658 MPI_Datatype type_recv;
4659 MPI_Type_indexed(Nrows_to_recv_for_get_ordered[p],block_lengths,
4660 Rows_to_recv_for_get_ordered[p],MPI_DOUBLE,
4662 MPI_Type_commit(&type_recv);
4665 MPI_Request recv_req;
4666 MPI_Irecv(w.
values_pt(),1,type_recv,p,0,
4667 this->distribution_pt()->communicator_pt()->mpi_comm(),
4669 MPI_Type_free(&type_recv);
4670 requests.push_back(recv_req);
4678 const double* v_values_pt = v.
values_pt();
4679 for (
unsigned i = 0;
i < Nrows_to_send_for_get_ordered[p];
i++)
4681 w_values_pt[Rows_to_recv_for_get_ordered[p][
i]] =
4682 v_values_pt[Rows_to_send_for_get_ordered[p][
i]];
4688 unsigned c = requests.size();
4692 MPI_Waitall(c,&requests[0],&stat[0]);
4694 delete[] block_lengths;
4698 std::ostringstream error_message;
4699 error_message <<
"The preconditioner is distributed and on more than one " 4700 <<
"processor. MPI is required.";
4702 OOMPH_CURRENT_FUNCTION,
4703 OOMPH_EXCEPTION_LOCATION);
4742 std::ostringstream error_message;
4743 error_message <<
"The distribution of the global vector v must be setup.";
4745 OOMPH_CURRENT_FUNCTION,
4746 OOMPH_EXCEPTION_LOCATION);
4750 std::ostringstream error_message;
4751 error_message <<
"The distribution of the global vector v must match the " 4752 <<
" specified master_distribution_pt(). \n" 4753 <<
"i.e. Distribution_pt in the master preconditioner";
4755 OOMPH_CURRENT_FUNCTION,
4756 OOMPH_EXCEPTION_LOCATION);
4761 unsigned nblocks = this->nblock_types();
4765 for (
unsigned b = 0; b < nblocks; b++)
4767 block_vec_number[b]=b;
4771 get_concatenated_block_vector(block_vec_number,v,w);
4800 std::ostringstream error_message;
4801 error_message <<
"The distribution of the global vector v must be setup.";
4803 OOMPH_CURRENT_FUNCTION,
4804 OOMPH_EXCEPTION_LOCATION);
4808 std::ostringstream error_message;
4809 error_message <<
"The distribution of the global vector v must match the " 4810 <<
" specified master_distribution_pt(). \n" 4811 <<
"i.e. Distribution_pt in the master preconditioner";
4813 OOMPH_CURRENT_FUNCTION,
4814 OOMPH_EXCEPTION_LOCATION);
4818 std::ostringstream error_message;
4819 error_message <<
"The distribution of the block vector w must be setup.";
4821 OOMPH_CURRENT_FUNCTION,
4822 OOMPH_EXCEPTION_LOCATION);
4824 if (*w.
distribution_pt() != *this->internal_preconditioner_matrix_distribution_pt())
4826 std::ostringstream error_message;
4827 error_message <<
"The distribution of the block vector w must match the " 4828 <<
" specified distribution at Distribution_pt[b]";
4830 OOMPH_CURRENT_FUNCTION,
4831 OOMPH_EXCEPTION_LOCATION);
4839 if (this->distribution_pt()->communicator_pt()->nproc() == 1 ||
4840 !this->distribution_pt()->distributed())
4843 unsigned nblock = this->Internal_nblock_types;
4846 unsigned block_offset = 0;
4849 for (
unsigned b = 0; b < nblock;b++)
4851 unsigned block_nrow = this->internal_block_dimension(b);
4852 for (
unsigned i = 0;
i < block_nrow;
i++)
4854 v_pt[this->Global_index[b][
i]] = w_pt[block_offset+
i];
4856 block_offset += block_nrow;
4862 #ifdef OOMPH_HAS_MPI 4865 unsigned my_rank = this->distribution_pt()->communicator_pt()->my_rank();
4868 unsigned nproc = this->distribution_pt()->communicator_pt()->nproc();
4871 unsigned max_n_send_or_recv = 0;
4872 for (
unsigned p = 0; p < nproc; p++)
4874 max_n_send_or_recv =
4875 std::max(max_n_send_or_recv,Nrows_to_send_for_get_ordered[p]);
4876 max_n_send_or_recv =
4877 std::max(max_n_send_or_recv,Nrows_to_recv_for_get_ordered[p]);
4882 int* block_lengths =
new int[max_n_send_or_recv];
4883 for (
unsigned i = 0;
i < max_n_send_or_recv;
i++)
4885 block_lengths[
i] = 1;
4890 for (
unsigned p = 0; p < nproc; p++)
4895 if (Nrows_to_recv_for_get_ordered[p] > 0)
4898 MPI_Datatype type_send;
4899 MPI_Type_indexed(Nrows_to_recv_for_get_ordered[p],block_lengths,
4900 Rows_to_recv_for_get_ordered[p],MPI_DOUBLE,
4902 MPI_Type_commit(&type_send);
4905 MPI_Request send_req;
4906 MPI_Isend(const_cast<double*>(w.
values_pt()),1,type_send,p,0,
4907 this->distribution_pt()->communicator_pt()->mpi_comm(),
4909 MPI_Type_free(&type_send);
4910 requests.push_back(send_req);
4913 if (Nrows_to_send_for_get_ordered[p] > 0)
4916 MPI_Datatype type_recv;
4917 MPI_Type_indexed(Nrows_to_send_for_get_ordered[p],block_lengths,
4918 Rows_to_send_for_get_ordered[p],MPI_DOUBLE,
4920 MPI_Type_commit(&type_recv);
4923 MPI_Request recv_req;
4924 MPI_Irecv(v.
values_pt(),1,type_recv,p,0,
4925 this->distribution_pt()->communicator_pt()->mpi_comm(),&recv_req);
4926 MPI_Type_free(&type_recv);
4927 requests.push_back(recv_req);
4934 const double* w_values_pt = w.
values_pt();
4936 for (
unsigned i = 0;
i < Nrows_to_send_for_get_ordered[p];
i++)
4938 v_values_pt[Rows_to_send_for_get_ordered[p][
i]] =
4939 w_values_pt[Rows_to_recv_for_get_ordered[p][
i]];
4945 unsigned c = requests.size();
4949 MPI_Waitall(c,&requests[0],&stat[0]);
4951 delete[] block_lengths;
4955 std::ostringstream error_message;
4956 error_message <<
"The preconditioner is distributed and on more than one " 4957 <<
"processor. MPI is required.";
4959 OOMPH_CURRENT_FUNCTION,
4960 OOMPH_EXCEPTION_LOCATION);
4987 std::ostringstream error_message;
4988 error_message <<
"The distribution of the global vector v must be setup.";
4990 OOMPH_CURRENT_FUNCTION,
4991 OOMPH_EXCEPTION_LOCATION);
4995 std::ostringstream error_message;
4996 error_message <<
"The distribution of the global vector v must match the " 4997 <<
" specified master_distribution_pt(). \n" 4998 <<
"i.e. Distribution_pt in the master preconditioner";
5000 OOMPH_CURRENT_FUNCTION,
5001 OOMPH_EXCEPTION_LOCATION);
5005 std::ostringstream error_message;
5006 error_message <<
"The distribution of the block vector w must be setup.";
5008 OOMPH_CURRENT_FUNCTION,
5009 OOMPH_EXCEPTION_LOCATION);
5011 if (*w.
distribution_pt() != *this->preconditioner_matrix_distribution_pt())
5013 std::ostringstream error_message;
5014 error_message <<
"The distribution of the block vector w must match the " 5015 <<
"concatenations of distributions in " 5016 <<
"Block_distribution_pt.\n";
5018 OOMPH_CURRENT_FUNCTION,
5019 OOMPH_EXCEPTION_LOCATION);
5024 const unsigned nblocks = nblock_types();
5026 for (
unsigned b = 0; b < nblocks; b++)
5028 block_vec_number[b] = b;
5031 return_concatenated_block_vector(block_vec_number,w,v);
5047 const unsigned n_blocks = this->internal_nblock_types();
5050 if (block_i >= n_blocks || block_j >= n_blocks)
5052 std::ostringstream error_message;
5053 error_message <<
"Requested block (" << block_i <<
"," << block_j
5054 <<
"), however this preconditioner has internal_nblock_types() " 5055 <<
"= " << internal_nblock_types() << std::endl;
5057 OOMPH_CURRENT_FUNCTION,
5058 OOMPH_EXCEPTION_LOCATION);
5062 if(is_subsidiary_block_preconditioner())
5064 if(master_block_preconditioner_pt()->matrix_pt() != matrix_pt())
5066 std::string err =
"Master and subs should have same matrix.";
5068 OOMPH_EXCEPTION_LOCATION);
5084 int* j_column_index;
5088 j_row_start = cr_matrix_pt->
row_start();
5090 j_value = cr_matrix_pt->
value();
5093 unsigned block_nrow = this->internal_block_dimension(block_i);
5094 unsigned block_ncol = this->internal_block_dimension(block_j);
5100 int* temp_row_start =
new int[block_nrow+1];
5101 for (
unsigned i = 0;
i <= block_nrow;
i++)
5103 temp_row_start[
i] = 0;
5109 unsigned master_nrow = this->master_nrow();
5114 for (
unsigned k = 0; k < master_nrow; k++)
5116 if (internal_block_number(k) == static_cast<int>(block_i))
5118 for (
int l = j_row_start[k];
5119 l < j_row_start[k+1]; l++)
5121 if (internal_block_number(j_column_index[l]) ==
5122 static_cast<int>(block_j))
5125 temp_ptr[internal_index_in_block(k)+1]++;
5132 int* temp_column_index =
new int[block_nnz];
5133 double* temp_value =
new double[block_nnz];
5139 temp_row_start[0] = 0;
5140 for (
unsigned k = 1; k <= block_nrow; k++)
5142 temp_row_start[k] = temp_row_start[k-1]+temp_ptr[k];
5143 temp_ptr[k] = temp_row_start[k];
5148 for (
unsigned k = 0; k < master_nrow; k++)
5150 if (internal_block_number(k) == static_cast<int>(block_i))
5152 for (
int l = j_row_start[k];
5153 l < j_row_start[k+1]; l++)
5155 if (internal_block_number(j_column_index[l]) ==
5156 static_cast<int>(block_j))
5158 int kk = temp_ptr[internal_index_in_block(k)]++;
5159 temp_value[kk] = j_value[l];
5160 temp_column_index[kk] =
5161 internal_index_in_block(j_column_index[l]);
5172 output_block.
build(Internal_block_distribution_pt[block_i]);
5174 temp_value,temp_column_index,
5180 if (Run_block_matrix_test)
5183 block_matrix_test(block_i, block_j, &output_block);
5192 #ifdef OOMPH_HAS_MPI 5194 unsigned nproc = this->distribution_pt()->communicator_pt()->nproc();
5197 unsigned my_rank = this->distribution_pt()->communicator_pt()->my_rank();
5200 int* j_row_start = cr_matrix_pt->
row_start();
5202 double* j_value = cr_matrix_pt->
value();
5218 unsigned nrow_local = Internal_block_distribution_pt[block_i]->nrow_local();
5224 for (
unsigned p = 0; p < nproc; p++)
5226 int nrow_send = Nrows_to_send_for_get_block(block_i,p);
5227 int nrow_recv = Nrows_to_recv_for_get_block(block_i,p);
5230 nnz_recv[p] =
new int[nrow_recv];
5233 if (nrow_send > 0 && p != my_rank)
5235 nnz_send[p] =
new int[nrow_send];
5240 for (
int i = 0;
i < nrow_send;
i++)
5242 unsigned row = Rows_to_send_for_get_block(block_i,p)[
i];
5244 for (
int r = j_row_start[row]; r < j_row_start[row+1]; r++)
5246 if (internal_block_number(j_column_index[r]) ==
int(block_j))
5259 total_nnz_send[p] += c;
5268 MPI_Isend(nnz_send[p],nrow_send,MPI_INT,p,0,
5269 this->distribution_pt()->communicator_pt()->mpi_comm(),
5271 send_req.push_back(req);
5278 MPI_Irecv(nnz_recv[p],nrow_recv,MPI_INT,p,0,
5279 this->distribution_pt()->communicator_pt()->mpi_comm(),
5281 recv1_req.push_back(req);
5288 for (
unsigned p = 0; p < nproc; p++)
5290 int nrow_send = Nrows_to_send_for_get_block(block_i,p);
5295 if (total_nnz_send[p] > 0)
5297 values_for_proc[p] =
new double[total_nnz_send[p]];
5298 column_index_for_proc[p] =
new int[total_nnz_send[p]];
5302 for (
int i = 0;
i < nrow_send;
i++)
5304 unsigned row = Rows_to_send_for_get_block(block_i,p)[
i];
5305 for (
int r = j_row_start[row]; r < j_row_start[row+1]; r++)
5307 if (internal_block_number(j_column_index[r]) ==
int(block_j))
5309 values_for_proc[p][ptr] = j_value[r];
5310 column_index_for_proc[p][ptr] =
5311 internal_index_in_block(j_column_index[r]);
5318 MPI_Datatype types[2];
5319 MPI_Type_contiguous(total_nnz_send[p],MPI_DOUBLE,&types[0]);
5320 MPI_Type_commit(&types[0]);
5321 MPI_Type_contiguous(total_nnz_send[p],MPI_INT,&types[1]);
5322 MPI_Type_commit(&types[1]);
5325 MPI_Aint displacement[2];
5326 MPI_Get_address(values_for_proc[p],&displacement[0]);
5327 MPI_Get_address(column_index_for_proc[p],&displacement[1]);
5330 displacement[1] -= displacement[0];
5331 displacement[0] -= displacement[0];
5335 length[0] = length[1] = 1;
5338 MPI_Datatype final_type;
5339 MPI_Type_create_struct(2,length,displacement,types,&final_type);
5340 MPI_Type_commit(&final_type);
5341 MPI_Type_free(&types[0]);
5342 MPI_Type_free(&types[1]);
5346 MPI_Isend(values_for_proc[p],1,final_type,p,1,
5347 this->distribution_pt()->communicator_pt()->mpi_comm(),
5349 send_req.push_back(req);
5350 MPI_Type_free(&final_type);
5357 int c_recv = recv1_req.size();
5360 MPI_Waitall(c_recv,&recv1_req[0],MPI_STATUS_IGNORE);
5365 int local_block_nnz = 0;
5366 for (
unsigned p = 0; p < nproc; p++)
5369 for (
unsigned i = 0;
i < Nrows_to_recv_for_get_block(block_i,p);
i++)
5371 total_nnz_recv_from_proc[p] += nnz_recv[p][
i];
5374 local_block_nnz += total_nnz_recv_from_proc[p];
5382 for (
unsigned p = 0; p < nproc; p++)
5384 if (Nrows_to_recv_for_get_block(block_i,p) > 0)
5386 n_recv_block[p] = 1;
5388 for (
unsigned i = 1;
i < Nrows_to_recv_for_get_block(block_i,p);
i++)
5390 if (Rows_to_recv_for_get_block(block_i,p)[
i] !=
5391 Rows_to_recv_for_get_block(block_i,p)[
i-1] + 1)
5399 int* row_start_recv =
new int[nrow_local+1];
5400 for (
unsigned i = 0;
i <= nrow_local;
i++)
5402 row_start_recv[
i] = 0;
5404 for (
unsigned p = 0; p < nproc; p++)
5406 for (
unsigned i = 0;
i < Nrows_to_recv_for_get_block(block_i,p);
i++)
5408 row_start_recv[Rows_to_recv_for_get_block(block_i,p)[
i]]
5412 int g = row_start_recv[0];
5413 row_start_recv[0] = 0;
5414 for (
unsigned i = 1;
i < nrow_local;
i++)
5417 g = row_start_recv[
i];
5418 row_start_recv[
i] = row_start_recv[
i-1] + temp_g;
5420 row_start_recv[nrow_local] = row_start_recv[nrow_local-1] + g;
5425 for (
unsigned p = 0; p < nproc; p++)
5427 if (Nrows_to_recv_for_get_block(block_i,p) > 0)
5429 offset_recv_block[p] =
new int[n_recv_block[p]];
5430 offset_recv_block[p][0] = 0;
5431 nnz_recv_block[p] =
new int[n_recv_block[p]];
5432 for (
int i = 0;
i < n_recv_block[p];
i++)
5434 nnz_recv_block[p][
i] = 0;
5437 nnz_recv_block[p][ptr] += nnz_recv[p][0];
5438 offset_recv_block[p][0]
5439 = row_start_recv[Rows_to_recv_for_get_block(block_i,p)[0]];
5440 for (
unsigned i = 1;
i < Nrows_to_recv_for_get_block(block_i,p);
i++)
5442 if (Rows_to_recv_for_get_block(block_i,p)[
i] !=
5443 Rows_to_recv_for_get_block(block_i,p)[
i-1] + 1)
5446 offset_recv_block[p][ptr]
5447 = row_start_recv[Rows_to_recv_for_get_block(block_i,p)[
i]];
5449 nnz_recv_block[p][ptr] += nnz_recv[p][
i];
5452 delete[] nnz_recv[p];
5456 int* column_index_recv =
new int[local_block_nnz];
5457 double* values_recv =
new double[local_block_nnz];
5459 for (
unsigned p = 0; p < nproc; p++)
5463 if (total_nnz_recv_from_proc[p] != 0)
5466 MPI_Datatype types[2];
5467 MPI_Type_indexed(n_recv_block[p],nnz_recv_block[p],
5468 offset_recv_block[p],MPI_DOUBLE,&types[0]);
5469 MPI_Type_commit(&types[0]);
5470 MPI_Type_indexed(n_recv_block[p],nnz_recv_block[p],
5471 offset_recv_block[p],MPI_INT,&types[1]);
5472 MPI_Type_commit(&types[1]);
5475 MPI_Aint displacements[2];
5476 MPI_Get_address(values_recv,&displacements[0]);
5477 MPI_Get_address(column_index_recv,&displacements[1]);
5478 displacements[1] -= displacements[0];
5479 displacements[0] -= displacements[0];
5483 length[0] = length[1] = 1;
5486 MPI_Datatype final_type;
5487 MPI_Type_create_struct(2,length,displacements,types,&final_type);
5488 MPI_Type_commit(&final_type);
5489 MPI_Type_free(&types[0]);
5490 MPI_Type_free(&types[1]);
5494 MPI_Irecv(values_recv,1,final_type,p,1,
5495 this->distribution_pt()->communicator_pt()->mpi_comm(),
5497 recv2_req.push_back(req);
5498 MPI_Type_free(&final_type);
5504 unsigned block_ptr = 0;
5505 unsigned counter = 0;
5506 int nrow_send = Nrows_to_send_for_get_block(block_i,my_rank);
5509 unsigned offset = offset_recv_block[my_rank][0];
5510 for (
int i = 0;
i < nrow_send;
i++)
5514 if (Rows_to_recv_for_get_block(block_i,p)[
i] !=
5515 Rows_to_recv_for_get_block(block_i,p)[
i-1] + 1)
5519 offset = offset_recv_block[my_rank][block_ptr];
5522 unsigned row = Rows_to_send_for_get_block(block_i,my_rank)[
i];
5523 for (
int r = j_row_start[row]; r < j_row_start[row+1]; r++)
5525 if (internal_block_number(j_column_index[r]) ==
int(block_j))
5527 values_recv[offset+counter] = j_value[r];
5528 column_index_recv[offset + counter] =
5529 internal_index_in_block(j_column_index[r]);
5539 c_recv = recv2_req.size();
5542 MPI_Waitall(c_recv,&recv2_req[0],MPI_STATUS_IGNORE);
5546 output_block.
build(Internal_block_distribution_pt[block_i]);
5554 int c_send = send_req.size();
5557 MPI_Waitall(c_send,&send_req[0],MPI_STATUS_IGNORE);
5561 for (
unsigned p = 0; p < nproc; p++)
5563 delete[] nnz_send[p];
5564 delete[] column_index_for_proc[p];
5565 delete[] values_for_proc[p];
5566 delete[] offset_recv_block[p];
5567 delete[] nnz_recv_block[p];
5571 std::ostringstream error_message;
5572 error_message <<
"The matrix is distributed and on more than one " 5573 <<
"processor. MPI is required.";
5575 OOMPH_CURRENT_FUNCTION,
5576 OOMPH_EXCEPTION_LOCATION);
5595 const bool& ignore_replacement_block)
const 5599 unsigned para_ndofs = ndof_types();
5602 if (block_i >= para_ndofs || block_j >= para_ndofs)
5604 std::ostringstream err_msg;
5605 err_msg <<
"Requested dof block (" << block_i <<
"," << block_j
5606 <<
"), however this preconditioner has ndof_types() " 5607 <<
"= " << para_ndofs << std::endl;
5609 OOMPH_CURRENT_FUNCTION,
5610 OOMPH_EXCEPTION_LOCATION);
5614 CRDoubleMatrix * tmp_block_pt = Replacement_dof_block_pt.get(block_i,block_j);
5616 if((tmp_block_pt == 0) || ignore_replacement_block)
5620 const unsigned ndof_in_parent_i = Doftype_coarsen_map_coarse[block_i].size();
5621 const unsigned ndof_in_parent_j = Doftype_coarsen_map_coarse[block_j].size();
5623 if(ndof_in_parent_i == 1 && ndof_in_parent_j == 1)
5625 unsigned parent_dof_i = Doftype_coarsen_map_coarse[block_i][0];
5626 unsigned parent_dof_j = Doftype_coarsen_map_coarse[block_j][0];
5628 if(is_master_block_preconditioner())
5630 internal_get_block(parent_dof_i,parent_dof_j,output_block);
5634 parent_dof_i = Doftype_in_master_preconditioner_coarse[parent_dof_i];
5635 parent_dof_j = Doftype_in_master_preconditioner_coarse[parent_dof_j];
5637 master_block_preconditioner_pt()->get_dof_level_block(parent_dof_i,
5640 ignore_replacement_block);
5650 for (
unsigned dof_i = 0; dof_i < ndof_in_parent_i; dof_i++)
5652 unsigned parent_dof_i = Doftype_coarsen_map_coarse[block_i][dof_i];
5653 if(is_subsidiary_block_preconditioner())
5655 parent_dof_i = Doftype_in_master_preconditioner_coarse[parent_dof_i];
5658 for (
unsigned dof_j = 0; dof_j < ndof_in_parent_j; dof_j++)
5660 unsigned parent_dof_j = Doftype_coarsen_map_coarse[block_j][dof_j];
5664 new_block[dof_i][dof_j] = 1;
5666 if(is_master_block_preconditioner())
5668 internal_get_block(parent_dof_i,parent_dof_j,*tmp_blocks_pt(dof_i,dof_j));
5672 parent_dof_j = Doftype_in_master_preconditioner_coarse[parent_dof_j];
5674 master_block_preconditioner_pt()
5675 ->get_dof_level_block(parent_dof_i,
5677 *tmp_blocks_pt(dof_i,dof_j),
5678 ignore_replacement_block);
5685 for (
unsigned parent_dof_i = 0; parent_dof_i < ndof_in_parent_i; parent_dof_i++)
5687 unsigned mapped_dof_i = Doftype_coarsen_map_coarse[block_i][parent_dof_i];
5689 if(is_master_block_preconditioner())
5691 tmp_row_dist_pt[parent_dof_i] = Internal_block_distribution_pt[mapped_dof_i];
5695 mapped_dof_i = Doftype_in_master_preconditioner_coarse[mapped_dof_i];
5697 tmp_row_dist_pt[parent_dof_i]
5698 = master_block_preconditioner_pt()
5699 ->dof_block_distribution_pt(mapped_dof_i);
5706 for (
unsigned parent_dof_j = 0; parent_dof_j < ndof_in_parent_j; parent_dof_j++)
5708 unsigned mapped_dof_j = Doftype_coarsen_map_coarse[block_j][parent_dof_j];
5710 if(is_master_block_preconditioner())
5712 tmp_col_dist_pt[parent_dof_j] = Internal_block_distribution_pt[mapped_dof_j];
5716 mapped_dof_j = Doftype_in_master_preconditioner_coarse[mapped_dof_j];
5717 tmp_col_dist_pt[parent_dof_j]
5718 = master_block_preconditioner_pt()
5719 ->dof_block_distribution_pt(mapped_dof_j);
5729 for (
unsigned dof_i = 0; dof_i < ndof_in_parent_i; dof_i++)
5731 for (
unsigned dof_j = 0; dof_j < ndof_in_parent_j; dof_j++)
5733 if(new_block[dof_i][dof_j])
5735 delete tmp_blocks_pt(dof_i,dof_j);
5754 const MATRIX* block_matrix_pt)
const 5761 unsigned n_row = matrix_pt()->
nrow();
5764 unsigned n_col = matrix_pt()->ncol();
5767 for (
unsigned i = 0;
i < n_row;
i++)
5772 if (static_cast<int>(block_i) == this->internal_block_number(
i))
5776 for (
unsigned j = 0; j < n_col; j++)
5781 if (static_cast<int>(block_j) == this->internal_block_number(j))
5786 if ( matrix_pt()->
operator()(
i,j) !=
5788 ->
operator()(internal_index_in_block(
i),internal_index_in_block(j)) )
5800 std::ostringstream error_message;
5801 error_message <<
"The require elements have not been successfully copied" 5802 <<
" from the original matrix to the block matrices";
5804 OOMPH_CURRENT_FUNCTION,
5805 OOMPH_EXCEPTION_LOCATION);
int * column_index()
Access to C-style column index array.
unsigned long nnz() const
Return the number of nonzero entries (the local nnz)
void internal_return_block_vector(const unsigned &n, const DoubleVector &b, DoubleVector &v) const
Takes the n-th block ordered vector, b, and copies its entries to the appropriate entries in the natu...
unsigned long nrow() const
Return the number of rows of the matrix.
void get_block_vector(const unsigned &n, const DoubleVector &v, DoubleVector &b) const
Takes the naturally ordered vector, v and returns the n-th block vector, b. Here n is the block numbe...
void internal_return_block_ordered_preconditioner_vector(const DoubleVector &w, DoubleVector &v) const
Takes the block ordered vector, w, and reorders it in the natural order. Reordered vector is returned...
unsigned long ncol() const
Return the number of columns of the matrix.
void get_block_vectors(const Vector< unsigned > &block_vec_number, const DoubleVector &v, Vector< DoubleVector > &s) const
Takes the naturally ordered vector and rearranges it into a vector of sub vectors corresponding to th...
double * values_pt()
access function to the underlying values
void concatenate(const Vector< LinearAlgebraDistribution *> &in_distribution_pt, LinearAlgebraDistribution &out_distribution)
Takes a vector of LinearAlgebraDistribution objects and concatenates them such that the nrow_local of...
OomphCommunicator * communicator_pt() const
const access to the communicator pointer
void split_without_communication(const DoubleVector &in_vector, Vector< DoubleVector *> &out_vector_pt)
Split a DoubleVector into the out DoubleVectors. Data stays on its current processor, no data is sent between processors. This results in our vectors which are a permutation of the in vector.
unsigned first_row() const
access function for the first row on this processor. If not distributed then this is just zero...
bool distributed() const
access function to the distributed - indicates whether the distribution is serial or distributed ...
double * value()
Access to C-style value array.
void get_block_ordered_preconditioner_vector(const DoubleVector &v, DoubleVector &w)
Given the naturally ordered vector, v, return the vector rearranged in block order in w...
void concatenate_without_communication(const Vector< DoubleVector *> &in_vector_pt, DoubleVector &out_vector)
Concatenate DoubleVectors. Takes a Vector of DoubleVectors. If the out vector is built, we will not build a new distribution. Otherwise a new distribution will be built using LinearAlgebraDistribution::concatenate(...).
static bool Run_block_matrix_test
Static boolean to allow block_matrix_test(...) to be run. Defaults to false.
void get_blocks(DenseMatrix< bool > &required_blocks, DenseMatrix< MATRIX *> &block_matrix_pt) const
Get all the block matrices required by the block preconditioner. Takes a pointer to a matrix of bools...
Describes the distribution of a distributable linear algebra type object. Typically this is a contain...
void build(const DoubleVector &old_vector)
Just copys the argument DoubleVector.
void deep_copy(const CRDoubleMatrix *const in_matrix_pt, CRDoubleMatrix &out_matrix)
Create a deep copy of the matrix pointed to by in_matrix_pt.
void get_concatenated_block_vector(const Vector< unsigned > &block_vec_number, const DoubleVector &v, DoubleVector &b)
Takes the naturally ordered vector and extracts the blocks indicated by the block number (the values)...
virtual void block_setup()
Determine the size of the matrix blocks and setup the lookup schemes relating the global degrees of f...
unsigned nrow_local() const
access function for the num of local rows on this processor. If no MPI then Nrow is returned...
void internal_return_block_vectors(const Vector< unsigned > &block_vec_number, const Vector< DoubleVector > &s, DoubleVector &v) const
A helper function, takes the vector of block vectors, s, and copies its entries into the naturally or...
void internal_get_block_vectors(const Vector< unsigned > &block_vec_number, const DoubleVector &v, Vector< DoubleVector > &s) const
Takes the naturally ordered vector and rearranges it into a vector of sub vectors corresponding to th...
void initialise(const _Tp &__value)
Iterate over all values and set to the desired value.
void internal_get_block(const unsigned &i, const unsigned &j, MATRIX &output_block) const
Gets block (i,j) from the matrix pointed to by Matrix_pt and returns it in output_block. This is associated with the internal blocks. Please use the other get_block(...) function.
unsigned nrow() const
access function to the number of global rows.
void turn_into_subsidiary_block_preconditioner(BlockPreconditioner< MATRIX > *master_block_prec_pt, const Vector< unsigned > &doftype_in_master_preconditioner_coarse)
Function to turn this preconditioner into a subsidiary preconditioner that operates within a bigger "...
std::string string(const unsigned &i)
Return the i-th string or "" if the relevant string hasn't been defined.
static long Is_unclassified
Static "Magic number" used in place of the equation number to denote a value that hasn't been classif...
A vector in the mathematical sense, initially developed for linear algebra type applications. If MPI then this vector can be distributed - its distribution is described by the LinearAlgebraDistribution object at Distribution_pt. Data is stored in a C-style pointer vector (double*)
void clear()
clears the distribution
void internal_get_block_ordered_preconditioner_vector(const DoubleVector &v, DoubleVector &w) const
Given the naturally ordered vector, v, return the vector rearranged in block order in w...
void return_block_vectors(const Vector< unsigned > &block_vec_number, const Vector< DoubleVector > &s, DoubleVector &v) const
Takes the vector of block vectors, s, and copies its entries into the naturally ordered vector...
void return_block_ordered_preconditioner_vector(const DoubleVector &w, DoubleVector &v) const
Takes the block ordered vector, w, and reorders it in natural order. Reordered vector is returned in ...
void internal_get_block_vector(const unsigned &n, const DoubleVector &v, DoubleVector &b) const
A helper function, takes the naturally ordered vector, v, and extracts the n-th block vector...
int * row_start()
Access to C-style row_start array.
void return_block_vector(const unsigned &n, const DoubleVector &b, DoubleVector &v) const
Takes the n-th block ordered vector, b, and copies its entries to the appropriate entries in the natu...
A class for compressed row matrices. This is a distributable object.
LinearAlgebraDistribution * distribution_pt() const
access to the LinearAlgebraDistribution
void return_concatenated_block_vector(const Vector< unsigned > &block_vec_number, const DoubleVector &b, DoubleVector &v) const
Takes concatenated block ordered vector, b, and copies its entries to the appropriate entries in the ...
void build(const LinearAlgebraDistribution *distribution_pt, const unsigned &ncol, const Vector< double > &value, const Vector< int > &column_index, const Vector< int > &row_start)
build method: vector of values, vector of column indices, vector of row starts and number of rows and...
void build_without_copy(const unsigned &ncol, const unsigned &nnz, double *value, int *column_index, int *row_start)
keeps the existing distribution and just matrix that is stored without copying the matrix data ...