33 #include <oomph-lib-config.h> 55 (Vector<CRDoubleMatrix*> matrix_pt,
56 Vector<Preconditioner*> prec_pt,
57 const OomphCommunicator* comm_pt)
63 Nprec = prec_pt.size();
69 std::ostringstream error_message;
70 error_message <<
"The PreconditionerArray requires at least 2 " 72 throw OomphLibError(error_message.str(),
73 OOMPH_CURRENT_FUNCTION,
74 OOMPH_EXCEPTION_LOCATION);
77 if (matrix_pt.size() !=
Nprec)
79 std::ostringstream error_message;
80 error_message <<
"The same number of preconditioners and matrices must " 81 <<
"be passed to the setup_preconditioners(...).";
82 throw OomphLibError(error_message.str(),
83 OOMPH_CURRENT_FUNCTION,
84 OOMPH_EXCEPTION_LOCATION);
99 if (matrix_pt[
i] == 0)
101 std::ostringstream error_message;
102 error_message <<
"matrix_pt[" <<
i <<
"] = NULL.";
103 throw OomphLibError(error_message.str(),
104 OOMPH_CURRENT_FUNCTION,
105 OOMPH_EXCEPTION_LOCATION);
109 if (!matrix_pt[i]->built())
111 std::ostringstream error_message;
112 error_message <<
"Matrix " << i <<
" has not been built.";
113 throw OomphLibError(error_message.str(),
114 OOMPH_CURRENT_FUNCTION,
115 OOMPH_EXCEPTION_LOCATION);
125 new OomphCommunicator
126 (matrix_pt[i]->distribution_pt()->communicator_pt());
133 *matrix_pt[i]->distribution_pt()->communicator_pt())
135 std::ostringstream error_message;
136 error_message <<
"All matrices must have the same communicator.";
137 throw OomphLibError(error_message.str(),
138 OOMPH_CURRENT_FUNCTION,
139 OOMPH_EXCEPTION_LOCATION);
146 (matrix_pt[i]->distribution_pt());
160 for (
unsigned p=0;p<
Nprec;p++)
167 for (
unsigned p=0; p<Nprec-1; p++)
175 for (
unsigned p=0;p<
Nprec;p++)
179 std::ostringstream error_message;
180 error_message <<
"We only have " << nproc <<
" processor[s]!\n" 181 <<
"This is not enough to perform the " << Nprec
182 <<
" block solves in parallel! Sorry! \n" 183 <<
"Please run this with more processors or disable the\n" 184 <<
"request for two-level paralellism.\n";
185 throw OomphLibError(error_message.str(),
186 OOMPH_CURRENT_FUNCTION,
187 OOMPH_EXCEPTION_LOCATION);
195 while (!(First_proc_for_prec[Color] <= my_rank &&
205 CRDoubleMatrix* local_matrix_pt = 0;
214 Vector<MPI_Request> req;
220 Vector< Vector<unsigned> > target_first_row(Nprec);
221 Vector< Vector<unsigned> > target_nrow_local(Nprec);
225 Vector< Vector<unsigned> > nnz_send(Nprec);
226 Vector< Vector<unsigned> > nnz_recv(Nprec);
242 for (
unsigned i = 0; i <
Nprec; i++)
247 if (matrix_pt[i]->distributed())
254 unsigned nrow = matrix_pt[
i]->nrow();
257 target_first_row[
i].resize(nproc);
258 target_nrow_local[
i].resize(nproc);
260 for (
unsigned p = 0; p < nproc_local; p++)
262 int pp = First_proc_for_prec[
i] + p;
263 target_first_row[
i][pp] = unsigned(
double(p*nrow)/
264 double(nproc_local));
266 for (
unsigned p = 0; p < nproc_local-1; p++)
268 int pp = First_proc_for_prec[
i] + p;
269 target_nrow_local[
i][pp] = target_first_row[
i][pp+1]
270 - target_first_row[
i][pp];
272 unsigned last_local_proc = First_proc_for_prec[
i] + nproc_local - 1;
273 target_nrow_local[
i][last_local_proc] = nrow -
274 target_first_row[
i][last_local_proc];
277 Vector<unsigned> current_first_row(nproc);
278 Vector<unsigned> current_nrow_local(nproc);
279 for (
unsigned p = 0; p < nproc; p++)
281 current_first_row[p] = matrix_pt[
i]->first_row(p);
282 current_nrow_local[p] = matrix_pt[
i]->nrow_local(p);
293 for (
unsigned p = 0; p < nproc; p++)
296 if ((target_first_row[i][p] < (current_first_row[my_rank] +
297 current_nrow_local[my_rank])) &&
298 (current_first_row[my_rank] < (target_first_row[i][p] +
299 target_nrow_local[i][p])))
302 std::max(current_first_row[my_rank],
303 target_first_row[i][p]);
305 std::min((current_first_row[my_rank] +
306 current_nrow_local[my_rank]),
307 (target_first_row[i][p] +
312 if ((target_first_row[i][my_rank] < (current_first_row[p] +
313 current_nrow_local[p]))
314 && (current_first_row[p] < (target_first_row[i][my_rank] +
315 target_nrow_local[i][my_rank])))
318 std::max(current_first_row[p],
319 target_first_row[i][my_rank]);
321 std::min((current_first_row[p] +
322 current_nrow_local[p]),
323 (target_first_row[i][my_rank] +
324 target_nrow_local[i][my_rank]))-
330 nnz_send[
i].resize(nproc);
334 for (
unsigned p = 0; p < nproc; p++)
338 int* row_start = matrix_pt[
i]->row_start();
346 for (
unsigned p = 0; p < nproc; p++)
361 MPI_Isend(&nnz_send[i][p],1,MPI_UNSIGNED,p,tag,
369 nnz_recv[
i].resize(nproc);
372 for (
unsigned pp = 0; pp < nproc; pp++)
376 unsigned p = (nproc + my_rank - pp)%nproc;
388 MPI_Recv(&nnz_temp,1,MPI_UNSIGNED,p,tag,
390 nnz_recv[
i][p] = nnz_temp;
397 nnz_recv[
i][p] = nnz_send[
i][p];
402 double* values_send = matrix_pt[
i]->value();
403 int* row_start_send = matrix_pt[
i]->row_start();
404 int* column_index_send = matrix_pt[
i]->column_index();
407 for (
unsigned p = 0; p < nproc; p++)
415 if (nnz_send[i][p] != 0)
423 int offset_nnz = row_start_send[offset_n];
429 MPI_Isend(values_send + offset_nnz,
int(nnz_send[i][p]),
438 MPI_Isend(column_index_send + offset_nnz,
int(nnz_send[i][p]),
447 MPI_Isend(row_start_send + offset_n,
459 for (
unsigned i = 0; i <
Nprec; i++)
464 if (!matrix_pt[i]->distributed())
466 oomph_info <<
"matrix not distributed" << std::endl;
472 LinearAlgebraDistribution* temp_dist_pt =
474 matrix_pt[i]->nrow(),
478 local_matrix_pt =
new CRDoubleMatrix(temp_dist_pt);
482 double* values_pt = matrix_pt[
i]->value();
483 int* column_index_pt = matrix_pt[
i]->column_index();
484 int* row_start_pt = matrix_pt[
i]->row_start();
487 local_matrix_pt->build_without_copy(matrix_pt[i]->ncol(),
508 LinearAlgebraDistribution* temp_dist_pt =
509 new LinearAlgebraDistribution
511 target_nrow_local[i][my_rank]);
514 local_matrix_pt =
new CRDoubleMatrix(temp_dist_pt);
520 unsigned nnz_total = 0;
521 for (
unsigned p = 0; p < nproc; p++)
523 nnz_total += nnz_recv[
i][p];
527 Vector<unsigned> nnz_start_proc;
528 Vector<unsigned> nnz_start_index;
529 unsigned row_ptr = target_first_row[
i][my_rank];
531 unsigned nnz_ptr = 0;
532 for (p = 0; p < int(nproc); p++)
537 nnz_ptr != nnz_total)
539 nnz_start_proc.push_back(p);
540 nnz_start_index.push_back(nnz_ptr);
541 nnz_ptr += nnz_recv[
i][p];
548 double* values_recv =
new double[nnz_total];
549 int* column_index_recv =
new int[nnz_total];
550 int* row_start_recv =
new int[target_nrow_local[
i][my_rank]+1];
553 for (
unsigned pp = 0; pp < nproc; pp++)
557 unsigned p = (nproc + my_rank - pp)%nproc;
564 if (nnz_recv[i][p] != 0)
573 while (nnz_start_proc[k] != p)
577 int offset_nnz = nnz_start_index[k];
582 MPI_Recv(values_recv + offset_nnz,
int(nnz_recv[i][p]),
589 MPI_Recv(column_index_recv + offset_nnz,
int(nnz_recv[i][p]),
596 MPI_Recv(row_start_recv + offset_n,
604 if (nnz_recv[i][p] != 0)
608 double* values_send = matrix_pt[
i]->value();
609 int* row_start_send = matrix_pt[
i]->row_start();
610 int* column_index_send = matrix_pt[
i]->column_index();
613 unsigned offset_n_send =
616 unsigned offset_nnz_send = row_start_send[offset_n_send];
619 unsigned offset_n_recv =
624 while (nnz_start_proc[k] != p)
628 unsigned offset_nnz_recv = nnz_start_index[k];
633 unsigned n_nnz = nnz_send[
i][my_rank];
634 for (
unsigned j = 0; j < n_nnz; j++)
636 values_recv[offset_nnz_recv + j] =
637 values_send[offset_nnz_send + j];
638 column_index_recv[offset_nnz_recv + j] =
639 column_index_send[offset_nnz_send + j];
644 for (
unsigned j = 0; j < n_n; j++)
646 row_start_recv[offset_n_recv + j] =
647 row_start_send[offset_n_send + j];
658 unsigned nproc_contrib = nnz_start_index.size();
659 for (
unsigned j = 0; j < nproc_contrib; j++)
662 target_first_row[
i][my_rank];
664 unsigned nnz_inc = nnz_start_index[j]-row_start_recv[first];
665 for (
unsigned k = first; k < last; k++)
667 row_start_recv[k]+=nnz_inc;
670 row_start_recv[target_nrow_local[
i][my_rank]] = int(nnz_total);
673 local_matrix_pt->build_without_copy(matrix_pt[i]->ncol(),
685 Vector<MPI_Status> stat(c);
686 MPI_Waitall(c,&req[0],&stat[0]);
702 unsigned* nnz_recv_temp =
new unsigned[nproc*
Nprec];
703 for (
unsigned j = 0; j < nproc*
Nprec; j++)
705 nnz_recv_temp[j] = 0;
710 for (
unsigned i = 0; i <
Nprec; i++)
715 if (!matrix_pt[i]->distributed())
723 LinearAlgebraDistribution* temp_dist_pt =
725 matrix_pt[i]->nrow(),
729 local_matrix_pt =
new CRDoubleMatrix(temp_dist_pt);
733 double* values_pt = matrix_pt[
i]->value();
734 int* column_index_pt = matrix_pt[
i]->column_index();
735 int* row_start_pt = matrix_pt[
i]->row_start();
738 local_matrix_pt->build_without_copy(matrix_pt[i]->ncol(),
755 unsigned nrow = matrix_pt[
i]->nrow();
758 target_first_row[
i].resize(nproc);
759 target_nrow_local[
i].resize(nproc);
761 for (
unsigned p = 0; p < nproc_local; p++)
763 int pp = First_proc_for_prec[
i] + p;
764 target_first_row[
i][pp] = unsigned(
double(p*nrow)/
765 double(nproc_local));
767 for (
unsigned p = 0; p < nproc_local-1; p++)
769 int pp = First_proc_for_prec[
i] + p;
770 target_nrow_local[
i][pp] = target_first_row[
i][pp+1]
771 - target_first_row[
i][pp];
773 unsigned last_local_proc = First_proc_for_prec[
i] + nproc_local - 1;
774 target_nrow_local[
i][last_local_proc] = nrow -
775 target_first_row[
i][last_local_proc];
778 Vector<unsigned> current_first_row(nproc);
779 Vector<unsigned> current_nrow_local(nproc);
780 for (
unsigned p = 0; p < nproc; p++)
782 current_first_row[p] = matrix_pt[
i]->first_row(p);
783 current_nrow_local[p] = matrix_pt[
i]->nrow_local(p);
794 for (
unsigned p = 0; p < nproc; p++)
797 if ((target_first_row[i][p] < (current_first_row[my_rank] +
798 current_nrow_local[my_rank])) &&
799 (current_first_row[my_rank] < (target_first_row[i][p] +
800 target_nrow_local[i][p])))
803 std::max(current_first_row[my_rank],
804 target_first_row[i][p]);
806 std::min((current_first_row[my_rank] +
807 current_nrow_local[my_rank]),
808 (target_first_row[i][p] +
813 if ((target_first_row[i][my_rank] < (current_first_row[p] +
814 current_nrow_local[p]))
815 && (current_first_row[p] < (target_first_row[i][my_rank] +
816 target_nrow_local[i][my_rank])))
819 std::max(current_first_row[p],
820 target_first_row[i][my_rank]);
822 std::min((current_first_row[p] +
823 current_nrow_local[p]),
824 (target_first_row[i][my_rank] +
825 target_nrow_local[i][my_rank]))-
831 nnz_send[
i].resize(nproc);
835 for (
unsigned p = 0; p < nproc; p++)
839 int* row_start = matrix_pt[
i]->row_start();
847 nnz_recv[
i].resize(nproc);
850 for (
unsigned p = 0; p < nproc; p++)
867 MPI_Isend(&nnz_send[i][p],1,MPI_UNSIGNED,p,tag,
878 MPI_Irecv(nnz_recv_temp + (i*nproc) + p,1,MPI_UNSIGNED,p,tag,
888 nnz_recv_temp[(i*nproc)+p] = nnz_send[i][p];
896 Vector<MPI_Status> stat(c);
897 MPI_Waitall(c,&req[0],&stat[0]);
902 for (
unsigned i = 0; i <
Nprec; i++)
904 for (
unsigned p = 0; p < nproc; p++)
906 nnz_recv[
i][p] = nnz_recv_temp[(i*nproc)+p];
909 delete nnz_recv_temp;
914 unsigned nnz_total = 0;
915 for (
unsigned p = 0; p < nproc; p++)
917 nnz_total += nnz_recv[
Color][p];
921 Vector<unsigned> nnz_start_proc;
922 Vector<unsigned> nnz_start_index;
923 unsigned row_ptr = target_first_row[
Color][my_rank];
925 unsigned nnz_ptr = 0;
926 for (p = 0; p < int(nproc); p++)
930 nnz_ptr != nnz_total)
932 nnz_start_proc.push_back(p);
933 nnz_start_index.push_back(nnz_ptr);
934 nnz_ptr += nnz_recv[
Color][p];
941 Vector<MPI_Datatype> datatypes;
944 double* values_recv =
new double[nnz_total];
945 int* column_index_recv =
new int[nnz_total];
946 int* row_start_recv =
new int[target_nrow_local[
Color][my_rank]+1];
952 Vector<MPI_Request> send_req;
956 for (
unsigned i = 0; i <
Nprec; i++)
960 double* values_send = matrix_pt[
i]->value();
961 int* row_start_send = matrix_pt[
i]->row_start();
962 int* column_index_send = matrix_pt[
i]->column_index();
965 for (
unsigned p = 0; p < nproc; p++)
973 if (nnz_send[i][p] != 0)
982 MPI_Datatype datatype_values;
983 MPI_Type_contiguous(
int(nnz_send[i][p]),MPI_DOUBLE,
985 MPI_Type_commit(&datatype_values);
986 datatypes.push_back(datatype_values);
989 MPI_Datatype datatype_column_index;
990 MPI_Type_contiguous(
int(nnz_send[i][p]),MPI_INT,
991 &datatype_column_index);
992 MPI_Type_commit(&datatype_column_index);
993 datatypes.push_back(datatype_column_index);
996 MPI_Datatype datatype_row_start;
998 &datatype_row_start);
999 MPI_Type_commit(&datatype_row_start);
1000 datatypes.push_back(datatype_row_start);
1003 MPI_Datatype typelist[3];
1004 typelist[0] = datatype_values;
1005 typelist[1] = datatype_column_index;
1006 typelist[2] = datatype_row_start;
1013 int offset_nnz = row_start_send[offset_n];
1016 MPI_Aint displacements[3];
1017 MPI_Get_address(values_send + offset_nnz,&displacements[0]);
1018 MPI_Get_address(column_index_send + offset_nnz,&displacements[1]);
1019 MPI_Get_address(row_start_send + offset_n,&displacements[2]);
1020 for (
int j = 2; j >= 0; j--)
1022 displacements[j] -= displacements[0];
1026 int block_length[3];
1027 block_length[0] = block_length[1] = block_length[2] = 1;
1030 MPI_Datatype send_type;
1031 MPI_Type_create_struct(3,block_length,displacements,typelist,
1033 MPI_Type_commit(&send_type);
1034 datatypes.push_back(send_type);
1039 send_req.push_back(tr1);
1040 MPI_Isend(values_send + offset_nnz,1,send_type,
1052 unsigned c_recv = 0;
1053 Vector<MPI_Request> recv_req;
1056 for (
unsigned p = 0; p < nproc; p++)
1064 if (nnz_recv[Color][p] != 0)
1073 MPI_Datatype datatype_values;
1074 MPI_Type_contiguous(
int(nnz_recv[Color][p]),MPI_DOUBLE,
1076 MPI_Type_commit(&datatype_values);
1077 datatypes.push_back(datatype_values);
1080 MPI_Datatype datatype_column_index;
1081 MPI_Type_contiguous(
int(nnz_recv[Color][p]),MPI_INT,
1082 &datatype_column_index);
1083 MPI_Type_commit(&datatype_column_index);
1084 datatypes.push_back(datatype_column_index);
1087 MPI_Datatype datatype_row_start;
1089 &datatype_row_start);
1090 MPI_Type_commit(&datatype_row_start);
1091 datatypes.push_back(datatype_row_start);
1094 MPI_Datatype typelist[3];
1095 typelist[0] = datatype_values;
1096 typelist[1] = datatype_column_index;
1097 typelist[2] = datatype_row_start;
1105 while (nnz_start_proc[k] != p)
1109 int offset_nnz = nnz_start_index[k];
1112 MPI_Aint displacements[3];
1113 MPI_Get_address(values_recv + offset_nnz,&displacements[0]);
1114 MPI_Get_address(column_index_recv + offset_nnz,&displacements[1]);
1115 MPI_Get_address(row_start_recv + offset_n,&displacements[2]);
1116 for (
int j = 2; j >= 0; j--)
1118 displacements[j] -= displacements[0];
1122 int block_length[3];
1123 block_length[0] = block_length[1] = block_length[2] = 1;
1126 MPI_Datatype recv_type;
1127 MPI_Type_create_struct(3,block_length,displacements,typelist,
1129 MPI_Type_commit(&recv_type);
1130 datatypes.push_back(recv_type);
1135 recv_req.push_back(tr1);
1136 MPI_Irecv(values_recv + offset_nnz,1,recv_type,
1145 if (nnz_recv[Color][my_rank] != 0)
1149 double* values_send = matrix_pt[
Color]->value();
1150 int* row_start_send = matrix_pt[
Color]->row_start();
1151 int* column_index_send = matrix_pt[
Color]->column_index();
1154 unsigned offset_n_send =
1158 unsigned offset_nnz_send = row_start_send[offset_n_send];
1161 unsigned offset_n_recv =
1166 while (nnz_start_proc[k] != my_rank)
1170 unsigned offset_nnz_recv = nnz_start_index[k];
1175 unsigned n_nnz = nnz_send[
Color][my_rank];
1176 for (
unsigned j = 0; j < n_nnz; j++)
1178 values_recv[offset_nnz_recv + j] =
1179 values_send[offset_nnz_send + j];
1180 column_index_recv[offset_nnz_recv + j] =
1181 column_index_send[offset_nnz_send + j];
1186 for (
unsigned j = 0; j < n_n; j++)
1188 row_start_recv[offset_n_recv + j] =
1189 row_start_send[offset_n_send + j];
1194 LinearAlgebraDistribution* temp_dist_pt =
1195 new LinearAlgebraDistribution
1197 target_nrow_local[Color][my_rank]);
1200 local_matrix_pt =
new CRDoubleMatrix(temp_dist_pt);
1201 delete temp_dist_pt;
1208 Vector<MPI_Status> recv_stat(c_recv);
1209 MPI_Waitall(c_recv,&recv_req[0],&recv_stat[0]);
1217 unsigned nproc_contrib = nnz_start_index.size();
1218 for (
unsigned j = 0; j < nproc_contrib; j++)
1221 target_first_row[
Color][my_rank];
1223 unsigned nnz_inc = nnz_start_index[j]-row_start_recv[first];
1224 for (
unsigned k = first; k < last; k++)
1226 row_start_recv[k]+=nnz_inc;
1229 row_start_recv[target_nrow_local[
Color][my_rank]] = int(nnz_total);
1232 local_matrix_pt->build_without_copy(matrix_pt[Color]->ncol(),
1241 Vector<MPI_Status> send_stat(c_recv);
1242 MPI_Waitall(c_send,&send_req[0],&send_stat[0]);
1248 unsigned ndatatypes = datatypes.size();
1249 for (
unsigned i = 0; i < ndatatypes; i++)
1251 MPI_Type_free(&datatypes[i]);
1268 unsigned* nnz_recv_temp =
new unsigned[nproc*
Nprec];
1269 for (
unsigned j = 0; j < nproc*
Nprec; j++)
1271 nnz_recv_temp[j] = 0;
1276 for (
unsigned i = 0; i <
Nprec; i++)
1281 if (!matrix_pt[i]->distributed())
1289 LinearAlgebraDistribution* temp_dist_pt =
1291 matrix_pt[i]->nrow(),
1295 local_matrix_pt =
new CRDoubleMatrix(temp_dist_pt);
1296 delete temp_dist_pt;
1299 double* values_pt = matrix_pt[
i]->value();
1300 int* column_index_pt = matrix_pt[
i]->column_index();
1301 int* row_start_pt = matrix_pt[
i]->row_start();
1304 local_matrix_pt->build_without_copy(matrix_pt[i]->ncol(),
1305 matrix_pt[i]->nnz(),
1321 unsigned nrow = matrix_pt[
i]->nrow();
1324 target_first_row[
i].resize(nproc);
1325 target_nrow_local[
i].resize(nproc);
1327 for (
unsigned p = 0; p < nproc_local; p++)
1329 int pp = First_proc_for_prec[
i] + p;
1330 target_first_row[
i][pp] = unsigned(
double(p*nrow)/
1331 double(nproc_local));
1333 for (
unsigned p = 0; p < nproc_local-1; p++)
1335 int pp = First_proc_for_prec[
i] + p;
1336 target_nrow_local[
i][pp] = target_first_row[
i][pp+1]
1337 - target_first_row[
i][pp];
1339 unsigned last_local_proc = First_proc_for_prec[
i] + nproc_local - 1;
1340 target_nrow_local[
i][last_local_proc] = nrow -
1341 target_first_row[
i][last_local_proc];
1344 Vector<unsigned> current_first_row(nproc);
1345 Vector<unsigned> current_nrow_local(nproc);
1346 for (
unsigned p = 0; p < nproc; p++)
1348 current_first_row[p] = matrix_pt[
i]->first_row(p);
1349 current_nrow_local[p] = matrix_pt[
i]->nrow_local(p);
1360 for (
unsigned p = 0; p < nproc; p++)
1363 if ((target_first_row[i][p] < (current_first_row[my_rank] +
1364 current_nrow_local[my_rank])) &&
1365 (current_first_row[my_rank] < (target_first_row[i][p] +
1366 target_nrow_local[i][p])))
1369 std::max(current_first_row[my_rank],
1370 target_first_row[i][p]);
1372 std::min((current_first_row[my_rank] +
1373 current_nrow_local[my_rank]),
1374 (target_first_row[i][p] +
1379 if ((target_first_row[i][my_rank] < (current_first_row[p] +
1380 current_nrow_local[p]))
1381 && (current_first_row[p] < (target_first_row[i][my_rank] +
1382 target_nrow_local[i][my_rank])))
1385 std::max(current_first_row[p],
1386 target_first_row[i][my_rank]);
1388 std::min((current_first_row[p] +
1389 current_nrow_local[p]),
1390 (target_first_row[i][my_rank] +
1391 target_nrow_local[i][my_rank]))-
1397 nnz_send[
i].resize(nproc);
1401 for (
unsigned p = 0; p < nproc; p++)
1405 int* row_start = matrix_pt[
i]->row_start();
1413 nnz_recv[
i].resize(nproc);
1416 for (
unsigned p = 0; p < nproc; p++)
1433 MPI_Isend(&nnz_send[i][p],1,MPI_UNSIGNED,p,tag,
1444 MPI_Irecv(nnz_recv_temp + (i*nproc) + p,1,MPI_UNSIGNED,p,tag,
1454 nnz_recv_temp[(i*nproc)+p] = nnz_send[i][p];
1462 Vector<MPI_Status> stat(c);
1463 MPI_Waitall(c,&req[0],&stat[0]);
1468 for (
unsigned i = 0; i <
Nprec; i++)
1470 for (
unsigned p = 0; p < nproc; p++)
1472 nnz_recv[
i][p] = nnz_recv_temp[(i*nproc)+p];
1475 delete nnz_recv_temp;
1480 unsigned nnz_total = 0;
1481 for (
unsigned p = 0; p < nproc; p++)
1483 nnz_total += nnz_recv[
Color][p];
1487 Vector<unsigned> nnz_start_proc;
1488 Vector<unsigned> nnz_start_index;
1489 unsigned row_ptr = target_first_row[
Color][my_rank];
1491 unsigned nnz_ptr = 0;
1492 for (p = 0; p < int(nproc); p++)
1496 nnz_ptr != nnz_total)
1498 nnz_start_proc.push_back(p);
1499 nnz_start_index.push_back(nnz_ptr);
1500 nnz_ptr += nnz_recv[
Color][p];
1507 Vector<MPI_Datatype> datatypes;
1510 double* values_recv =
new double[nnz_total];
1511 int* column_index_recv =
new int[nnz_total];
1512 int* row_start_recv =
new int[target_nrow_local[
Color][my_rank]+1];
1517 unsigned c_recv = 0;
1518 Vector<MPI_Request> recv_req;
1521 for (
unsigned p = 0; p < nproc; p++)
1529 if (nnz_recv[Color][p] != 0)
1538 MPI_Datatype datatype_values;
1539 MPI_Type_contiguous(
int(nnz_recv[Color][p]),MPI_DOUBLE,
1541 MPI_Type_commit(&datatype_values);
1542 datatypes.push_back(datatype_values);
1545 MPI_Datatype datatype_column_index;
1546 MPI_Type_contiguous(
int(nnz_recv[Color][p]),MPI_INT,
1547 &datatype_column_index);
1548 MPI_Type_commit(&datatype_column_index);
1549 datatypes.push_back(datatype_column_index);
1552 MPI_Datatype datatype_row_start;
1554 &datatype_row_start);
1555 MPI_Type_commit(&datatype_row_start);
1556 datatypes.push_back(datatype_row_start);
1559 MPI_Datatype typelist[3];
1560 typelist[0] = datatype_values;
1561 typelist[1] = datatype_column_index;
1562 typelist[2] = datatype_row_start;
1570 while (nnz_start_proc[k] != p)
1574 int offset_nnz = nnz_start_index[k];
1577 MPI_Aint displacements[3];
1578 MPI_Get_address(values_recv + offset_nnz,&displacements[0]);
1579 MPI_Get_address(column_index_recv + offset_nnz,&displacements[1]);
1580 MPI_Get_address(row_start_recv + offset_n,&displacements[2]);
1581 for (
int j = 2; j >= 0; j--)
1583 displacements[j] -= displacements[0];
1587 int block_length[3];
1588 block_length[0] = block_length[1] = block_length[2] = 1;
1591 MPI_Datatype recv_type;
1592 MPI_Type_create_struct(3,block_length,displacements,typelist,
1594 MPI_Type_commit(&recv_type);
1595 datatypes.push_back(recv_type);
1600 recv_req.push_back(tr1);
1601 MPI_Irecv(values_recv + offset_nnz,1,recv_type,
1612 unsigned c_send = 0;
1613 Vector<MPI_Request> send_req;
1617 for (
unsigned i = 0; i <
Nprec; i++)
1621 double* values_send = matrix_pt[
i]->value();
1622 int* row_start_send = matrix_pt[
i]->row_start();
1623 int* column_index_send = matrix_pt[
i]->column_index();
1626 for (
unsigned p = 0; p < nproc; p++)
1634 if (nnz_send[i][p] != 0)
1643 MPI_Datatype datatype_values;
1644 MPI_Type_contiguous(
int(nnz_send[i][p]),MPI_DOUBLE,
1646 MPI_Type_commit(&datatype_values);
1647 datatypes.push_back(datatype_values);
1650 MPI_Datatype datatype_column_index;
1651 MPI_Type_contiguous(
int(nnz_send[i][p]),MPI_INT,
1652 &datatype_column_index);
1653 MPI_Type_commit(&datatype_column_index);
1654 datatypes.push_back(datatype_column_index);
1657 MPI_Datatype datatype_row_start;
1659 &datatype_row_start);
1660 MPI_Type_commit(&datatype_row_start);
1661 datatypes.push_back(datatype_row_start);
1664 MPI_Datatype typelist[3];
1665 typelist[0] = datatype_values;
1666 typelist[1] = datatype_column_index;
1667 typelist[2] = datatype_row_start;
1674 int offset_nnz = row_start_send[offset_n];
1677 MPI_Aint displacements[3];
1678 MPI_Get_address(values_send + offset_nnz,&displacements[0]);
1679 MPI_Get_address(column_index_send + offset_nnz,&displacements[1]);
1680 MPI_Get_address(row_start_send + offset_n,&displacements[2]);
1681 for (
int j = 2; j >= 0; j--)
1683 displacements[j] -= displacements[0];
1687 int block_length[3];
1688 block_length[0] = block_length[1] = block_length[2] = 1;
1691 MPI_Datatype send_type;
1692 MPI_Type_create_struct(3,block_length,displacements,typelist,
1694 MPI_Type_commit(&send_type);
1695 datatypes.push_back(send_type);
1700 send_req.push_back(tr1);
1701 MPI_Isend(values_send + offset_nnz,1,send_type,
1711 if (nnz_recv[Color][my_rank] != 0)
1715 double* values_send = matrix_pt[
Color]->value();
1716 int* row_start_send = matrix_pt[
Color]->row_start();
1717 int* column_index_send = matrix_pt[
Color]->column_index();
1720 unsigned offset_n_send =
1724 unsigned offset_nnz_send = row_start_send[offset_n_send];
1727 unsigned offset_n_recv =
1732 while (nnz_start_proc[k] != my_rank)
1736 unsigned offset_nnz_recv = nnz_start_index[k];
1741 unsigned n_nnz = nnz_send[
Color][my_rank];
1742 for (
unsigned j = 0; j < n_nnz; j++)
1744 values_recv[offset_nnz_recv + j] =
1745 values_send[offset_nnz_send + j];
1746 column_index_recv[offset_nnz_recv + j] =
1747 column_index_send[offset_nnz_send + j];
1752 for (
unsigned j = 0; j < n_n; j++)
1754 row_start_recv[offset_n_recv + j] =
1755 row_start_send[offset_n_send + j];
1760 LinearAlgebraDistribution* temp_dist_pt =
1761 new LinearAlgebraDistribution
1763 target_nrow_local[Color][my_rank]);
1766 local_matrix_pt =
new CRDoubleMatrix(temp_dist_pt);
1767 delete temp_dist_pt;
1774 Vector<MPI_Status> recv_stat(c_recv);
1775 MPI_Waitall(c_recv,&recv_req[0],&recv_stat[0]);
1783 unsigned nproc_contrib = nnz_start_index.size();
1784 for (
unsigned j = 0; j < nproc_contrib; j++)
1787 target_first_row[
Color][my_rank];
1789 unsigned nnz_inc = nnz_start_index[j]-row_start_recv[first];
1790 for (
unsigned k = first; k < last; k++)
1792 row_start_recv[k]+=nnz_inc;
1795 row_start_recv[target_nrow_local[
Color][my_rank]] = int(nnz_total);
1798 local_matrix_pt->build_without_copy(matrix_pt[Color]->ncol(),
1807 Vector<MPI_Status> send_stat(c_send);
1808 MPI_Waitall(c_send,&send_req[0],&send_stat[0]);
1814 unsigned ndatatypes = datatypes.size();
1815 for (
unsigned i = 0; i < ndatatypes; i++)
1817 MPI_Type_free(&datatypes[i]);
1835 unsigned* nnz_recv_temp =
new unsigned[nproc*
Nprec];
1836 for (
unsigned j = 0; j < nproc*
Nprec; j++)
1838 nnz_recv_temp[j] = 0;
1843 for (
unsigned i = 0; i <
Nprec; i++)
1848 if (!matrix_pt[i]->distributed())
1856 LinearAlgebraDistribution* temp_dist_pt =
1858 matrix_pt[i]->nrow(),
1862 local_matrix_pt =
new CRDoubleMatrix(temp_dist_pt);
1863 delete temp_dist_pt;
1866 double* values_pt = matrix_pt[
i]->value();
1867 int* column_index_pt = matrix_pt[
i]->column_index();
1868 int* row_start_pt = matrix_pt[
i]->row_start();
1871 local_matrix_pt->build_without_copy(matrix_pt[i]->ncol(),
1872 matrix_pt[i]->nnz(),
1888 unsigned nrow = matrix_pt[
i]->nrow();
1891 target_first_row[
i].resize(nproc);
1892 target_nrow_local[
i].resize(nproc);
1894 for (
unsigned p = 0; p < nproc_local; p++)
1896 int pp = First_proc_for_prec[
i] + p;
1897 target_first_row[
i][pp] = unsigned(
double(p*nrow)/
1898 double(nproc_local));
1900 for (
unsigned p = 0; p < nproc_local-1; p++)
1902 int pp = First_proc_for_prec[
i] + p;
1903 target_nrow_local[
i][pp] = target_first_row[
i][pp+1]
1904 - target_first_row[
i][pp];
1906 unsigned last_local_proc = First_proc_for_prec[
i] + nproc_local - 1;
1907 target_nrow_local[
i][last_local_proc] = nrow -
1908 target_first_row[
i][last_local_proc];
1911 Vector<unsigned> current_first_row(nproc);
1912 Vector<unsigned> current_nrow_local(nproc);
1913 for (
unsigned p = 0; p < nproc; p++)
1915 current_first_row[p] = matrix_pt[
i]->first_row(p);
1916 current_nrow_local[p] = matrix_pt[
i]->nrow_local(p);
1927 for (
unsigned p = 0; p < nproc; p++)
1930 if ((target_first_row[i][p] < (current_first_row[my_rank] +
1931 current_nrow_local[my_rank])) &&
1932 (current_first_row[my_rank] < (target_first_row[i][p] +
1933 target_nrow_local[i][p])))
1936 std::max(current_first_row[my_rank],
1937 target_first_row[i][p]);
1939 std::min((current_first_row[my_rank] +
1940 current_nrow_local[my_rank]),
1941 (target_first_row[i][p] +
1946 if ((target_first_row[i][my_rank] < (current_first_row[p] +
1947 current_nrow_local[p]))
1948 && (current_first_row[p] < (target_first_row[i][my_rank] +
1949 target_nrow_local[i][my_rank])))
1952 std::max(current_first_row[p],
1953 target_first_row[i][my_rank]);
1955 std::min((current_first_row[p] +
1956 current_nrow_local[p]),
1957 (target_first_row[i][my_rank] +
1958 target_nrow_local[i][my_rank]))-
1964 nnz_send[
i].resize(nproc);
1968 for (
unsigned p = 0; p < nproc; p++)
1972 int* row_start = matrix_pt[
i]->row_start();
1980 nnz_recv[
i].resize(nproc);
1983 for (
unsigned p = 0; p < nproc; p++)
2000 MPI_Isend(&nnz_send[i][p],1,MPI_UNSIGNED,p,tag,
2010 nnz_recv_temp[(i*nproc)+p] = nnz_send[i][p];
2017 for (
unsigned i = 0; i <
Nprec; i++)
2020 nnz_recv[
i].resize(nproc);
2023 for (
unsigned pp = 0; pp < nproc; pp++)
2027 unsigned p = (nproc + my_rank - pp)%nproc;
2039 MPI_Recv(&nnz_temp,1,MPI_UNSIGNED,p,tag,
2041 nnz_recv[
i][p] = nnz_temp;
2048 nnz_recv[
i][p] = nnz_send[
i][p];
2056 unsigned nnz_total = 0;
2057 for (
unsigned p = 0; p < nproc; p++)
2059 nnz_total += nnz_recv[
Color][p];
2063 Vector<unsigned> nnz_start_proc;
2064 Vector<unsigned> nnz_start_index;
2065 unsigned row_ptr = target_first_row[
Color][my_rank];
2067 unsigned nnz_ptr = 0;
2068 for (p = 0; p < int(nproc); p++)
2072 nnz_ptr != nnz_total)
2074 nnz_start_proc.push_back(p);
2075 nnz_start_index.push_back(nnz_ptr);
2076 nnz_ptr += nnz_recv[
Color][p];
2083 Vector<MPI_Datatype> datatypes;
2086 double* values_recv =
new double[nnz_total];
2087 int* column_index_recv =
new int[nnz_total];
2088 int* row_start_recv =
new int[target_nrow_local[
Color][my_rank]+1];
2093 unsigned c_recv = 0;
2094 Vector<MPI_Request> recv_req;
2097 for (
unsigned p = 0; p < nproc; p++)
2105 if (nnz_recv[Color][p] != 0)
2114 MPI_Datatype datatype_values;
2115 MPI_Type_contiguous(
int(nnz_recv[Color][p]),MPI_DOUBLE,
2117 MPI_Type_commit(&datatype_values);
2118 datatypes.push_back(datatype_values);
2121 MPI_Datatype datatype_column_index;
2122 MPI_Type_contiguous(
int(nnz_recv[Color][p]),MPI_INT,
2123 &datatype_column_index);
2124 MPI_Type_commit(&datatype_column_index);
2125 datatypes.push_back(datatype_column_index);
2128 MPI_Datatype datatype_row_start;
2130 &datatype_row_start);
2131 MPI_Type_commit(&datatype_row_start);
2132 datatypes.push_back(datatype_row_start);
2135 MPI_Datatype typelist[3];
2136 typelist[0] = datatype_values;
2137 typelist[1] = datatype_column_index;
2138 typelist[2] = datatype_row_start;
2146 while (nnz_start_proc[k] != p)
2150 int offset_nnz = nnz_start_index[k];
2153 MPI_Aint displacements[3];
2154 MPI_Get_address(values_recv + offset_nnz,&displacements[0]);
2155 MPI_Get_address(column_index_recv + offset_nnz,&displacements[1]);
2156 MPI_Get_address(row_start_recv + offset_n,&displacements[2]);
2157 for (
int j = 2; j >= 0; j--)
2159 displacements[j] -= displacements[0];
2163 int block_length[3];
2164 block_length[0] = block_length[1] = block_length[2] = 1;
2167 MPI_Datatype recv_type;
2168 MPI_Type_create_struct(3,block_length,displacements,typelist,
2170 MPI_Type_commit(&recv_type);
2171 datatypes.push_back(recv_type);
2176 recv_req.push_back(tr1);
2177 MPI_Irecv(values_recv + offset_nnz,1,recv_type,
2188 unsigned c_send = 0;
2189 Vector<MPI_Request> send_req;
2193 for (
unsigned i = 0; i <
Nprec; i++)
2197 double* values_send = matrix_pt[
i]->value();
2198 int* row_start_send = matrix_pt[
i]->row_start();
2199 int* column_index_send = matrix_pt[
i]->column_index();
2202 for (
unsigned p = 0; p < nproc; p++)
2210 if (nnz_send[i][p] != 0)
2219 MPI_Datatype datatype_values;
2220 MPI_Type_contiguous(
int(nnz_send[i][p]),MPI_DOUBLE,
2222 MPI_Type_commit(&datatype_values);
2223 datatypes.push_back(datatype_values);
2226 MPI_Datatype datatype_column_index;
2227 MPI_Type_contiguous(
int(nnz_send[i][p]),MPI_INT,
2228 &datatype_column_index);
2229 MPI_Type_commit(&datatype_column_index);
2230 datatypes.push_back(datatype_column_index);
2233 MPI_Datatype datatype_row_start;
2235 &datatype_row_start);
2236 MPI_Type_commit(&datatype_row_start);
2237 datatypes.push_back(datatype_row_start);
2240 MPI_Datatype typelist[3];
2241 typelist[0] = datatype_values;
2242 typelist[1] = datatype_column_index;
2243 typelist[2] = datatype_row_start;
2250 int offset_nnz = row_start_send[offset_n];
2253 MPI_Aint displacements[3];
2254 MPI_Get_address(values_send + offset_nnz,&displacements[0]);
2255 MPI_Get_address(column_index_send + offset_nnz,&displacements[1]);
2256 MPI_Get_address(row_start_send + offset_n,&displacements[2]);
2257 for (
int j = 2; j >= 0; j--)
2259 displacements[j] -= displacements[0];
2263 int block_length[3];
2264 block_length[0] = block_length[1] = block_length[2] = 1;
2267 MPI_Datatype send_type;
2268 MPI_Type_create_struct(3,block_length,displacements,typelist,
2270 MPI_Type_commit(&send_type);
2271 datatypes.push_back(send_type);
2276 send_req.push_back(tr1);
2277 MPI_Isend(values_send + offset_nnz,1,send_type,
2287 if (nnz_recv[Color][my_rank] != 0)
2291 double* values_send = matrix_pt[
Color]->value();
2292 int* row_start_send = matrix_pt[
Color]->row_start();
2293 int* column_index_send = matrix_pt[
Color]->column_index();
2296 unsigned offset_n_send =
2300 unsigned offset_nnz_send = row_start_send[offset_n_send];
2303 unsigned offset_n_recv =
2308 while (nnz_start_proc[k] != my_rank)
2312 unsigned offset_nnz_recv = nnz_start_index[k];
2317 unsigned n_nnz = nnz_send[
Color][my_rank];
2318 for (
unsigned j = 0; j < n_nnz; j++)
2320 values_recv[offset_nnz_recv + j] =
2321 values_send[offset_nnz_send + j];
2322 column_index_recv[offset_nnz_recv + j] =
2323 column_index_send[offset_nnz_send + j];
2328 for (
unsigned j = 0; j < n_n; j++)
2330 row_start_recv[offset_n_recv + j] =
2331 row_start_send[offset_n_send + j];
2336 LinearAlgebraDistribution* temp_dist_pt =
2337 new LinearAlgebraDistribution
2339 target_nrow_local[Color][my_rank]);
2342 local_matrix_pt =
new CRDoubleMatrix(temp_dist_pt);
2343 delete temp_dist_pt;
2350 Vector<MPI_Status> recv_stat(c_recv);
2351 MPI_Waitall(c_recv,&recv_req[0],&recv_stat[0]);
2359 unsigned nproc_contrib = nnz_start_index.size();
2360 for (
unsigned j = 0; j < nproc_contrib; j++)
2363 target_first_row[
Color][my_rank];
2365 unsigned nnz_inc = nnz_start_index[j]-row_start_recv[first];
2366 for (
unsigned k = first; k < last; k++)
2368 row_start_recv[k]+=nnz_inc;
2371 row_start_recv[target_nrow_local[
Color][my_rank]] = int(nnz_total);
2374 local_matrix_pt->build_without_copy(matrix_pt[Color]->ncol(),
2383 Vector<MPI_Status> send_stat(c_recv);
2384 MPI_Waitall(c_send,&send_req[0],&send_stat[0]);
2390 unsigned ndatatypes = datatypes.size();
2391 for (
unsigned i = 0; i < ndatatypes; i++)
2393 MPI_Type_free(&datatypes[i]);
2402 if (matrix_pt[0]->distributed())
2404 delete local_matrix_pt;
2408 for (
unsigned i = 0; i <
Nprec; i++)
2422 Vector<DoubleVector> &z)
2428 std::ostringstream error_message;
2429 error_message <<
"The preconditioners have not been setup.";
2430 throw OomphLibError(error_message.str(),
2431 OOMPH_CURRENT_FUNCTION,
2432 OOMPH_EXCEPTION_LOCATION);
2436 if (r.size() !=
Nprec)
2438 std::ostringstream error_message;
2439 error_message <<
"This PreconditionerArray has " << Nprec
2440 <<
" preconditioners but r only contains " 2441 << r.size() <<
" preconditioners.";
2442 throw OomphLibError(error_message.str(),
2443 OOMPH_CURRENT_FUNCTION,
2444 OOMPH_EXCEPTION_LOCATION);
2448 if (z.size() !=
Nprec)
2450 std::ostringstream error_message;
2451 error_message <<
"This PreconditionerArray has " << Nprec
2452 <<
" preconditioners but z only contains " 2453 << z.size() <<
" preconditioners.";
2454 throw OomphLibError(error_message.str(),
2455 OOMPH_CURRENT_FUNCTION,
2456 OOMPH_EXCEPTION_LOCATION);
2460 for (
unsigned i = 0; i <
Nprec; i++)
2462 if (*r[i].distribution_pt() != *Distribution_pt[
i])
2464 std::ostringstream error_message;
2465 error_message <<
"The distribution of r[" << i <<
"] does not have the" 2466 <<
" the same distribution as the matrix_pt[" << i
2467 <<
"] that was passed to setup_preconditioners(...)";
2468 throw OomphLibError(error_message.str(),
2469 OOMPH_CURRENT_FUNCTION,
2470 OOMPH_EXCEPTION_LOCATION);
2485 Vector<MPI_Request> send_reqs;
2486 Vector<MPI_Request> recv_reqs;
2492 double* local_r_values = local_r.values_pt();
2498 for (
unsigned i = 0; i <
Nprec; i++)
2501 if (r[i].distributed())
2505 unsigned current_first_row = r[
i].first_row();
2508 for (
unsigned p = 0; p < nproc; p++)
2526 MPI_Isend(const_cast<double*>(r[i].values_pt())+offset_n,
2529 send_reqs.push_back(tr);
2543 MPI_Irecv(local_r_values + offset_n,
2546 recv_reqs.push_back(tr);
2555 if (!r[Color].distributed())
2558 const double* r_pt = r[
Color].values_pt();
2559 unsigned nrow_local = local_r.nrow_local();
2560 for (
unsigned i = 0; i < nrow_local; i++)
2562 local_r_values[
i] = r_pt[
i];
2568 const double* r_pt = r[
Color].values_pt();
2571 unsigned current_first_row = r[
Color].first_row();
2580 unsigned offset_n_send =
2584 unsigned offset_n_recv =
2589 for (
unsigned j = 0; j < n_n; j++)
2591 local_r_values[offset_n_recv + j] = r_pt[offset_n_send + j];
2597 unsigned n_recv = recv_reqs.size();
2600 MPI_Waitall(n_recv,&recv_reqs[0],MPI_STATUS_IGNORE);
2606 DoubleVector local_z;
2611 double* local_z_values = local_z.values_pt();
2614 for (
unsigned i = 0; i <
Nprec; i++)
2620 z[
i].build(r[i].distribution_pt(),0.0);
2625 for (
unsigned i = 0; i <
Nprec; i++)
2627 if (r[i].distributed())
2631 unsigned current_first_row = r[
i].first_row();
2634 for (
unsigned p = 0; p < nproc; p++)
2652 MPI_Irecv(z[i].values_pt() + offset_n,
2655 recv_reqs.push_back(tr);
2669 MPI_Isend(local_z_values + offset_n,
2672 send_reqs.push_back(tr);
2693 int p = j + First_proc_for_prec[
i];
2695 MPI_Isend(local_z_values,z[Color].nrow(),MPI_DOUBLE,p,0,
2697 send_reqs.push_back(tr);
2701 int p = my_local_rank;
2702 while ((p -
int(Nproc_for_prec[i])) >= 0)
2704 p-= Nproc_for_prec[
i];
2706 p += First_proc_for_prec[
i];
2710 MPI_Irecv(z[i].values_pt(),z[i].nrow(),MPI_DOUBLE,p,0,
2712 recv_reqs.push_back(tr);
2718 if (!r[Color].distributed())
2721 double* z_pt = z[
Color].values_pt();
2722 unsigned nrow_local = local_z.nrow_local();
2723 for (
unsigned i = 0; i < nrow_local; i++)
2725 z_pt[
i] = local_z_values[
i];
2731 double* z_pt = z[
Color].values_pt();
2734 unsigned current_first_row = r[
Color].first_row();
2743 unsigned offset_n_send =
2747 unsigned offset_n_recv =
2752 for (
unsigned j = 0; j < n_n; j++)
2754 z_pt[offset_n_send + j] =
2755 local_z_values[offset_n_recv + j];
2762 n_recv = recv_reqs.size();
2765 MPI_Waitall(n_recv,&recv_reqs[0],MPI_STATUS_IGNORE);
2770 unsigned n_send = send_reqs.size();
2773 MPI_Waitall(n_send,&send_reqs[0],MPI_STATUS_IGNORE);
unsigned Method
the communication method in the setup_preconditioners(...) method
virtual void preconditioner_solve(const DoubleVector &r, DoubleVector &z)=0
Apply the preconditioner. Pure virtual generic interface function. This method should apply the preco...
Vector< unsigned > Nproc_for_prec
The nrow_local component of the distribution of the processors over the preconditioners.
unsigned Color
the Color of this processor (or the preconditioner number)
int compute_tag(const int &nproc, const int &source, const int &dest, const int &type)
helper method for computing the MPI_Isend and MPI_Irecv tags
Preconditioner * Preconditioner_pt
The pointer to the local preconditioner on this processor.
void setup_preconditioners(Vector< CRDoubleMatrix *> matrix_pt, Vector< Preconditioner *> prec_pt, const OomphCommunicator *comm_pt)
Vector< Vector< unsigned > > First_row_for_proc
Storage (indexed [i][j]) for the first row that will be sent from this processor to processor j for p...
OomphCommunicator * Local_communicator_pt
Vector of communicators for the preconditioners.
Vector< LinearAlgebraDistribution * > Distribution_pt
Vector< unsigned > First_proc_for_prec
The first_row component of the distribution of the processors over the preconditioners.
OomphCommunicator * Global_communicator_pt
pointer to the global communicator for this preconditioner array
unsigned first_row() const
access function for the first row on this processor
void clean_up_memory()
Clean up memory.
void setup(DoubleMatrixBase *matrix_pt)
Setup the preconditioner: store the matrix pointer and the communicator pointer then call preconditio...
Vector< Vector< unsigned > > Nrow_local_for_proc
Storage (indexed [i][j]) for the nrow_local that will be sent from this processor to processor j for ...
Vector< Vector< unsigned > > First_row_from_proc
Storage (indexed [i][j]) for the first row that will be received by this processor from processor j f...
Vector< Vector< unsigned > > Nrow_local_from_proc
Storage (indexed [i][j]) for the nrow_local that will be received by this processor from processor j ...
void solve_preconditioners(const Vector< DoubleVector > &r, Vector< DoubleVector > &z)
Applies each preconditioner to the corresponding vector in r and z.
unsigned Nprec
the number of preconditioner in the array
LinearAlgebraDistribution * distribution_pt() const
access to the LinearAlgebraDistribution