64 std::ostringstream error_message;
65 error_message <<
"The solid mesh pointer must be set.\n" 66 <<
"Use method set_solid_mesh(...)";
68 OOMPH_CURRENT_FUNCTION,
69 OOMPH_EXCEPTION_LOCATION);
97 dof_to_block_map[ndof_types-1]=1;
100 double block_setup_time = t_block_finish - t_block_start;
103 oomph_info <<
"Time for block_setup(...) [sec]: " 104 << block_setup_time <<
"\n";
112 if (F_block_preconditioner_pt == 0)
114 F_preconditioner_is_block_preconditioner =
false;
124 double get_B_time = t_get_B_finish - t_get_B_start;
126 << get_B_time <<
"\n";
154 ivmm_assembly_time = ivmm_assembly_finish_t - ivmm_assembly_start_t;
155 oomph_info <<
"Time to assemble inverse mass matrix [sec]: " 156 << ivmm_assembly_time <<
"\n";
162 b_pt->
multiply(*ivmm_pt,*temp_matrix_pt);
163 delete b_pt; b_pt = 0;
164 b_pt = temp_matrix_pt;
168 double t_BQ_time = t_BQ_finish - t_BQ_start;
170 << t_BQ_time << std::endl;
180 double t_get_Bt_time = t_get_Bt_finish - t_get_Bt_start;
182 << t_get_Bt_time << std::endl;
188 b_pt->
multiply(*bt_pt,*p_matrix_pt);
192 double t_P_time = t_P_finish - t_P_start;
193 oomph_info <<
"Time to generate P matrix [sec]: " 194 << t_P_time << std::endl;
203 ivmm_pt->
multiply(*bt_pt,*temp_matrix_pt);
204 delete bt_pt; bt_pt = 0;
205 bt_pt = temp_matrix_pt;
211 double t_QBt_time = t_QBt_finish - t_QBt_start;
213 << t_QBt_time << std::endl;
226 double t_get_F_time = t_get_F_finish - t_get_F_start;
228 << t_get_F_time << std::endl;
234 f_pt->
multiply(*bt_pt, *aux_matrix_pt);
238 double t_aux_time = t_aux_matrix_finish - t_aux_matrix_start;
240 << t_aux_time << std::endl;
248 if (F_preconditioner_is_block_preconditioner)
256 b_pt->
multiply(*aux_matrix_pt,*e_matrix_pt);
257 delete aux_matrix_pt;
262 double t_E_time = t_E_matrix_finish - t_E_matrix_start;
263 oomph_info <<
"Time to generate E (B*(F*Bt)) [sec]: " 264 << t_E_time << std::endl;
274 double t_E_time = t_E_matvec_finish - t_E_matvec_start;
275 oomph_info <<
"Time to build E (BFBt) matrix vector operator E [sec]: " 276 << t_E_time << std::endl;
286 double t_get_Bt_time = t_get_Bt_finish - t_get_Bt_start;
288 << t_get_Bt_time << std::endl;
310 ivmm_assembly_time = ivmm_assembly_finish_t - ivmm_assembly_start_t;
311 oomph_info <<
"Time to assemble Q (inverse diagonal " 312 <<
"mass matrix) [sec]: " 313 << ivmm_assembly_time <<
"\n";
323 double t_get_Bt_time = t_get_Bt_finish - t_get_Bt_start;
325 << t_get_Bt_time << std::endl;
334 delete bt_pt; bt_pt = 0;
339 double t_QBt_time = t_QBt_matrix_finish - t_QBt_matrix_start;
341 << t_QBt_time << std::endl;
348 b_pt->
multiply(*bt_pt, *p_matrix_pt);
352 double t_p_time = t_p_matrix_finish - t_p_matrix_start;
354 << t_p_time << std::endl;
356 delete b_pt; b_pt = 0;
366 double t_p_time = t_QBt_MV_finish - t_QBt_MV_start;
367 oomph_info <<
"Time to build QBt matrix vector operator [sec]: " 368 << t_p_time << std::endl;
370 delete bt_pt; bt_pt = 0;
378 double t_get_F_time = t_get_F_finish - t_get_F_start;
380 << t_get_F_time << std::endl;
391 double t_F_MV_time = t_F_MV_finish - t_F_MV_start;
392 oomph_info <<
"Time to build F Matrix Vector Operator [sec]: " 393 << t_F_MV_time << std::endl;
397 if (F_preconditioner_is_block_preconditioner)
399 delete f_pt; f_pt = 0;
409 double t_get_Bt_time = t_get_Bt_finish - t_get_Bt_start;
411 << t_get_Bt_time << std::endl;
427 double t_Bt_MV_time = t_Bt_MV_finish - t_Bt_MV_start;
428 oomph_info <<
"Time to build Bt Matrix Vector Operator [sec]: " 429 << t_Bt_MV_time << std::endl;
431 delete bt_pt; bt_pt = 0;
449 delete p_matrix_pt; p_matrix_pt = 0;
454 double t_p_prec_time = t_p_prec_finish - t_p_prec_start;
455 oomph_info <<
"P sub-preconditioner setup time [sec]: " 456 << t_p_prec_time <<
"\n";
471 if (F_preconditioner_is_block_preconditioner)
480 F_block_preconditioner_pt->
488 delete f_pt; f_pt = 0;
493 double t_f_prec_time = t_f_prec_finish - t_f_prec_start;
494 oomph_info <<
"F sub-preconditioner setup time [sec]: " 495 << t_f_prec_time <<
"\n";
516 std::ostringstream error_message;
517 error_message <<
"setup must be called before using preconditioner_solve";
520 OOMPH_CURRENT_FUNCTION,
521 OOMPH_EXCEPTION_LOCATION);
527 std::ostringstream error_message;
528 error_message <<
"The vectors z and r must have the same number of " 532 OOMPH_CURRENT_FUNCTION,
533 OOMPH_EXCEPTION_LOCATION);
564 oomph_info <<
"LSC prec solve: Time for get block vector: " 565 << t_end-t_start << std::endl;
576 std::ostringstream error_message;
577 error_message <<
"P_preconditioner_pt has not been set.";
580 OOMPH_CURRENT_FUNCTION,
581 OOMPH_EXCEPTION_LOCATION);
592 oomph_info <<
"LSC prec solve: First P solve [nrow=" 594 << t_end-t_start << std::endl;
610 another_temp_vec.
clear();
620 oomph_info <<
"LSC prec solve: E matrix vector product: " 621 << t_end-t_start << std::endl;
628 another_temp_vec.
clear();
635 oomph_info <<
"LSC prec solve: Second P solve [nrow=" 637 << t_end-t_start << std::endl;
649 temp_vec -= another_temp_vec;
666 oomph_info <<
"LSC prec solve: G matrix vector product: " 667 << t_end-t_start << std::endl;
675 another_temp_vec.
clear();
680 another_temp_vec += temp_vec;
689 std::ostringstream error_message;
690 error_message <<
"F_preconditioner_pt has not been set.";
693 OOMPH_CURRENT_FUNCTION,
694 OOMPH_EXCEPTION_LOCATION);
714 oomph_info <<
"LSC prec solve: F solve [nrow=" 716 << t_end-t_start << std::endl;
718 << t_end-t_start_overall << std::endl;
759 unsigned nproc = this->
comm_pt()->nproc();
762 unsigned my_rank = this->
comm_pt()->my_rank();
768 unsigned first_lookup_row = 0;
769 unsigned last_lookup_row = 0;
771 last_lookup_row = first_lookup_row +
804 for (
unsigned e = 0;
e < n_el;
e++)
821 cast_el_pt=
dynamic_cast< 829 std::ostringstream error_message;
832 <<
"SolidElementWithDiagonalMassMatrix*\n" 833 <<
"Element is of type: " 838 "PressureBasedSolidLSCPreconditioner::assemble_mass_matrix_diagonal()",
839 OOMPH_EXCEPTION_LOCATION);
848 for (
unsigned i = 0;
i < el_dof;
i++)
856 if (eqn_number >= first_lookup_row &&
857 eqn_number <= last_lookup_row)
868 for (
unsigned p = 0; p < nproc; p++)
870 if (index >= displ_dist_pt->
first_row(p) &&
880 m_values[index-
first_row] += el_vmm_diagonal[
i];
885 classified_contributions_send[p]
886 .push_back(el_vmm_diagonal[
i]);
887 classified_indices_send[p].push_back(index);
905 while (!(eqn_number >= master_distribution_pt->
first_row(p) &&
906 (eqn_number < (master_distribution_pt->
first_row(p)
913 unclassified_contributions_send[p]
914 .push_back(el_vmm_diagonal[
i]);
915 unclassified_indices_send[p].push_back(eqn_number);
926 unsigned* n_unclassified_send =
new unsigned[nproc];
927 for (
unsigned p = 0; p < nproc; p++)
931 n_unclassified_send[p] = 0;
935 n_unclassified_send[p]
936 = unclassified_contributions_send[p].size();
941 unsigned* n_unclassified_recv =
new unsigned[nproc];
942 MPI_Alltoall(n_unclassified_send,1,MPI_UNSIGNED,
943 n_unclassified_recv,1,MPI_UNSIGNED,
947 MPI_Aint base_displacement;
948 MPI_Get_address(m_values,&base_displacement);
957 for (
unsigned p = 0; p < nproc; p++)
962 if (n_unclassified_recv[p] > 0)
964 unclassified_contributions_recv[p]
965 =
new double[n_unclassified_recv[p]];
966 unclassified_indices_recv[p] =
new 967 unsigned[n_unclassified_recv[p]];
970 MPI_Datatype recv_types[2];
971 MPI_Aint recv_displacements[2];
975 MPI_Type_contiguous(n_unclassified_recv[p],MPI_DOUBLE,
977 MPI_Type_commit(&recv_types[0]);
978 MPI_Get_address(unclassified_contributions_recv[p],
979 &recv_displacements[0]);
980 recv_displacements[0] -= base_displacement;
984 MPI_Type_contiguous(n_unclassified_recv[p],MPI_UNSIGNED,
986 MPI_Type_commit(&recv_types[1]);
987 MPI_Get_address(unclassified_indices_recv[p],
988 &recv_displacements[1]);
989 recv_displacements[1] -= base_displacement;
993 MPI_Datatype final_recv_type;
994 MPI_Type_create_struct(2,recv_sz,recv_displacements,recv_types,
996 MPI_Type_commit(&final_recv_type);
1000 MPI_Irecv(m_values,1,final_recv_type,p,0,
1002 unclassified_recv_requests.push_back(req);
1003 unclassified_recv_proc.push_back(p);
1004 MPI_Type_free(&recv_types[0]);
1005 MPI_Type_free(&recv_types[1]);
1006 MPI_Type_free(&final_recv_type);
1010 if (n_unclassified_send[p] > 0)
1013 MPI_Datatype send_types[2];
1014 MPI_Aint send_displacements[2];
1018 MPI_Type_contiguous(n_unclassified_send[p],MPI_DOUBLE,
1020 MPI_Type_commit(&send_types[0]);
1021 MPI_Get_address(&unclassified_contributions_send[p][0],
1022 &send_displacements[0]);
1023 send_displacements[0] -= base_displacement;
1027 MPI_Type_contiguous(n_unclassified_send[p],MPI_UNSIGNED,
1029 MPI_Type_commit(&send_types[1]);
1030 MPI_Get_address(&unclassified_indices_send[p][0],
1031 &send_displacements[1]);
1032 send_displacements[1] -= base_displacement;
1036 MPI_Datatype final_send_type;
1037 MPI_Type_create_struct(2,send_sz,send_displacements,send_types,
1039 MPI_Type_commit(&final_send_type);
1043 MPI_Isend(m_values,1,final_send_type,p,0,
1045 unclassified_send_requests.push_back(req);
1046 MPI_Type_free(&send_types[0]);
1047 MPI_Type_free(&send_types[1]);
1048 MPI_Type_free(&final_send_type);
1054 unsigned n_unclassified_recv_req = unclassified_recv_requests.size();
1055 while (n_unclassified_recv_req > 0)
1060 MPI_Waitany(n_unclassified_recv_req,&unclassified_recv_requests[0],
1061 &req_num,MPI_STATUS_IGNORE);
1062 unsigned p = unclassified_recv_proc[req_num];
1063 unclassified_recv_requests.erase(unclassified_recv_requests.begin()
1065 unclassified_recv_proc.erase(unclassified_recv_proc.begin()+req_num);
1066 n_unclassified_recv_req--;
1070 unsigned n_recv = n_unclassified_recv[p];
1071 for (
unsigned i = 0;
i < n_recv;
i++)
1073 unsigned eqn_number = unclassified_indices_recv[p][
i];
1082 for (
unsigned pp = 0; pp < nproc; pp++)
1086 if (index >= displ_dist_pt->
first_row(pp) &&
1097 += unclassified_contributions_recv[p][
i];
1102 double v = unclassified_contributions_recv[p][
i];
1103 classified_contributions_send[pp].push_back(v);
1104 classified_indices_send[pp].push_back(index);
1112 delete[] unclassified_contributions_recv[p];
1113 delete[] unclassified_indices_recv[p];
1115 delete[] n_unclassified_recv;
1124 unsigned* n_classified_send =
new unsigned[nproc];
1125 for (
unsigned p = 0; p < nproc; p++)
1129 n_classified_send[p] = 0;
1133 n_classified_send[p]
1134 = classified_contributions_send[p].size();
1139 unsigned* n_classified_recv =
new unsigned[nproc];
1140 MPI_Alltoall(n_classified_send,1,MPI_UNSIGNED,
1141 n_classified_recv,1,MPI_UNSIGNED,
1151 for (
unsigned p = 0; p < nproc; p++)
1156 if (n_classified_recv[p] > 0)
1158 classified_contributions_recv[p]
1159 =
new double[n_classified_recv[p]];
1160 classified_indices_recv[p] =
new unsigned[n_classified_recv[p]];
1163 MPI_Datatype recv_types[2];
1164 MPI_Aint recv_displacements[2];
1168 MPI_Type_contiguous(n_classified_recv[p],MPI_DOUBLE,
1170 MPI_Type_commit(&recv_types[0]);
1171 MPI_Get_address(classified_contributions_recv[p],
1172 &recv_displacements[0]);
1173 recv_displacements[0] -= base_displacement;
1177 MPI_Type_contiguous(n_classified_recv[p],MPI_UNSIGNED,
1179 MPI_Type_commit(&recv_types[1]);
1180 MPI_Get_address(classified_indices_recv[p],
1181 &recv_displacements[1]);
1182 recv_displacements[1] -= base_displacement;
1186 MPI_Datatype final_recv_type;
1187 MPI_Type_create_struct(2,recv_sz,recv_displacements,recv_types,
1189 MPI_Type_commit(&final_recv_type);
1193 MPI_Irecv(m_values,1,final_recv_type,p,0,
1195 classified_recv_requests.push_back(req);
1196 classified_recv_proc.push_back(p);
1197 MPI_Type_free(&recv_types[0]);
1198 MPI_Type_free(&recv_types[1]);
1199 MPI_Type_free(&final_recv_type);
1203 if (n_classified_send[p] > 0)
1206 MPI_Datatype send_types[2];
1207 MPI_Aint send_displacements[2];
1211 MPI_Type_contiguous(n_classified_send[p],MPI_DOUBLE,
1213 MPI_Type_commit(&send_types[0]);
1214 MPI_Get_address(&classified_contributions_send[p][0],
1215 &send_displacements[0]);
1216 send_displacements[0] -= base_displacement;
1220 MPI_Type_contiguous(n_classified_send[p],MPI_UNSIGNED,
1222 MPI_Type_commit(&send_types[1]);
1223 MPI_Get_address(&classified_indices_send[p][0],
1224 &send_displacements[1]);
1225 send_displacements[1] -= base_displacement;
1229 MPI_Datatype final_send_type;
1230 MPI_Type_create_struct(2,send_sz,send_displacements,send_types,
1232 MPI_Type_commit(&final_send_type);
1236 MPI_Isend(m_values,1,final_send_type,p,0,
1238 classified_send_requests.push_back(req);
1239 MPI_Type_free(&send_types[0]);
1240 MPI_Type_free(&send_types[1]);
1241 MPI_Type_free(&final_send_type);
1247 unsigned n_classified_recv_req = classified_recv_requests.size();
1248 while (n_classified_recv_req > 0)
1253 MPI_Waitany(n_classified_recv_req,&classified_recv_requests[0],
1254 &req_num,MPI_STATUS_IGNORE);
1255 unsigned p = classified_recv_proc[req_num];
1256 classified_recv_requests.erase(classified_recv_requests.begin()
1258 classified_recv_proc.erase(classified_recv_proc.begin()+req_num);
1259 n_classified_recv_req--;
1263 unsigned n_recv = n_classified_recv[p];
1264 for (
unsigned i = 0;
i < n_recv;
i++)
1266 m_values[classified_indices_recv[p][
i]-
first_row]
1267 += classified_contributions_recv[p][
i];
1271 delete[] classified_contributions_recv[p];
1272 delete[] classified_indices_recv[p];
1276 unsigned n_unclassified_send_req = unclassified_send_requests.size();
1277 if (n_unclassified_send_req > 0)
1279 MPI_Waitall(n_unclassified_send_req,&unclassified_send_requests[0],
1282 delete[] unclassified_contributions_send;
1283 delete[] unclassified_indices_send;
1284 delete[] n_unclassified_send;
1287 unsigned n_classified_send_req = classified_send_requests.size();
1288 if (n_classified_send_req > 0)
1290 MPI_Waitall(n_classified_send_req,&classified_send_requests[0],
1293 delete[] classified_indices_send;
1294 delete[] classified_contributions_send;
1295 delete[] n_classified_recv;
1296 delete[] n_classified_send;
1308 for (
unsigned e = 0;
e < n_el;
e++)
1321 cast_el_pt=
dynamic_cast< 1330 std::ostringstream error_message;
1332 <<
"Failed cast to " 1333 <<
"SolidElementWithDiagonalMassMatrix*\n" 1334 <<
"Element is of type: " 1338 error_message.str(),
1339 "PressureBasedSolidLSCPreconditioner::assemble_mass_matrix_diagonal()",
1340 OOMPH_EXCEPTION_LOCATION);
1350 for (
unsigned i = 0;
i < el_dof;
i++)
1364 if (index >= first_row &&
1365 index < first_row + nrow_local)
1367 m_values[index-
first_row] += el_vmm_diagonal[
i];
1376 int* m_row_start =
new int[nrow_local+1];
1379 m_values[
i] = 1 / m_values[
i];
1380 m_column_index[
i] = first_row +
i;
bool Form_BFBt_product
indicates whether BFBt should be formed or the component matrices should be retained. If true then: in setup(...) : BFBt is computed. in preconditioner_solve(...) : a single matrix vector product with BFBt is performed. if false then: in setup(...) : the matrices B, F are assembled and stored. in preconditioner_solve(...) : a sequence of matrix vector products with B, F, and Bt is performed. (Note: in this discussion no scaling was considered but B and Bt are replaced with BQ and QBt with scaling)
virtual void get_mass_matrix_diagonal(Vector< double > &mass_diag)=0
Get the diagonal of whatever represents the mass matrix in the specific preconditionable element...
void get_block_vector(const unsigned &n, const DoubleVector &v, DoubleVector &b) const
Takes the naturally ordered vector, v and returns the n-th block vector, b. Here n is the block numbe...
bool any_mesh_distributed() const
Check if any of the meshes are distributed. This is equivalent to problem.distributed() and is used a...
int index_in_block(const unsigned &i_dof) const
Given a global dof number, returns the index in the block it belongs to. This is the overall index...
Preconditioner * P_preconditioner_pt
Pointer to the 'preconditioner' for the pressure matrix.
virtual void preconditioner_solve(const DoubleVector &r, DoubleVector &z)=0
Apply the preconditioner. Pure virtual generic interface function. This method should apply the preco...
void clean_up_memory()
Helper function to delete preconditioner data.
void clear()
wipes the DoubleVector
MatrixVectorProduct * Bt_mat_vec_pt
MatrixVectorProduct operator for Bt;.
void multiply(const DoubleVector &x, DoubleVector &soln) const
Multiply the matrix by the vector x: soln=Ax.
void multiply_transpose(const DoubleVector &x, DoubleVector &y) const
Apply the transpose of the operator to the vector x and return the result in the vector y...
bool is_halo() const
Is this element a halo?
unsigned nrow() const
access function to the number of global rows.
unsigned first_row() const
access function for the first row on this processor. If not distributed then this is just zero...
CRDoubleMatrix * assemble_mass_matrix_diagonal()
bool P_matrix_using_scaling
Control flag is true if mass matrix diagonal scaling is used in the Schur complement approximation...
void setup_matrix_vector_product(MatrixVectorProduct *matvec_prod_pt, CRDoubleMatrix *block_pt, const Vector< unsigned > &block_col_indices)
Setup a matrix vector product. matvec_prod_pt is a pointer to the MatrixVectorProduct, block_pt is a pointer to the block matrix, block_col_indices is a vector indicating which block indices does the RHS vector we want to multiply the matrix by.
unsigned ndof_types() const
Return the total number of DOF types.
void preconditioner_solve(const DoubleVector &r, DoubleVector &z)
Apply preconditioner to Vector r.
MatrixVectorProduct * F_mat_vec_pt
MatrixVectorProduct operator for F if BFBt is not to be formed.
bool is_subsidiary_block_preconditioner() const
Return true if this preconditioner is a subsidiary preconditioner.
const LinearAlgebraDistribution * master_distribution_pt() const
Access function to the distribution of the master preconditioner. If this preconditioner does not hav...
const LinearAlgebraDistribution * block_distribution_pt(const unsigned &b) const
Access function to the block distributions (const version).
unsigned long nelement() const
Return number of elements in the mesh.
MatrixVectorProduct * QBt_mat_vec_pt
MatrixVectorProduct operator for QBt if BFBt is not to be formed.
bool distributed() const
distribution is serial or distributed
unsigned ndof() const
Return the number of equations/dofs in the element.
Describes the distribution of a distributable linear algebra type object. Typically this is a contain...
bool Using_default_f_preconditioner
flag indicating whether the default F preconditioner is used
void build(const DoubleVector &old_vector)
Just copys the argument DoubleVector.
int block_number(const unsigned &i_dof) const
Return the block number corresponding to a global index i_dof.
GeneralisedElement *& element_pt(const unsigned long &e)
Return pointer to element e.
CRDoubleMatrix * matrix_pt() const
Access function to matrix_pt. If this is the master then cast the matrix pointer to MATRIX*...
MatrixVectorProduct * E_mat_vec_pt
MatrixVectorProduct operator for E (BFBt) if BFBt is to be formed.
void get_block(const unsigned &i, const unsigned &j, CRDoubleMatrix &output_matrix, const bool &ignore_replacement_block=false) const
Put block (i,j) into output_matrix. This block accounts for any coarsening of dof types and any repla...
virtual void block_setup()
Determine the size of the matrix blocks and setup the lookup schemes relating the global degrees of f...
unsigned long eqn_number(const unsigned &ieqn_local) const
Return the global equation number corresponding to the ieqn_local-th local equation number...
unsigned nrow_local() const
access function for the num of local rows on this processor. If no MPI then Nrow is returned...
unsigned first_row() const
access function for the first row on this processor
bool F_preconditioner_is_block_preconditioner
Boolean indicating whether the momentum system preconditioner is a block preconditioner.
void set_mesh(const unsigned &i, const Mesh *const mesh_pt, const bool &allow_multiple_element_type_in_mesh=false)
Set the i-th mesh for this block preconditioner. Note: The method set_nmesh(...) must be called befor...
double timer()
returns the time in seconds after some point in past
void setup(DoubleMatrixBase *matrix_pt)
Setup the preconditioner: store the matrix pointer and the communicator pointer then call preconditio...
bool Preconditioner_has_been_setup
Control flag is true if the preconditioner has been setup (used so we can wipe the data when the prec...
An interface to allow SuperLU to be used as an (exact) Preconditioner.
void setup()
Broken assignment operator.
unsigned nrow() const
access function to the number of global rows.
void turn_into_subsidiary_block_preconditioner(BlockPreconditioner< CRDoubleMatrix > *master_block_prec_pt, const Vector< unsigned > &doftype_in_master_preconditioner_coarse)
Function to turn this preconditioner into a subsidiary preconditioner that operates within a bigger "...
Matrix vector product helper class - primarily a wrapper to Trilinos's Epetra matrix vector product m...
virtual const OomphCommunicator * comm_pt() const
Get function for comm pointer.
unsigned nrow_local() const
access function for the num of local rows on this processor.
A vector in the mathematical sense, initially developed for linear algebra type applications. If MPI then this vector can be distributed - its distribution is described by the LinearAlgebraDistribution object at Distribution_pt. Data is stored in a C-style pointer vector (double*)
bool Using_default_p_preconditioner
flag indicating whether the default P preconditioner is used
void multiply(const DoubleVector &x, DoubleVector &y) const
Apply the operator to the vector x and return the result in the vector y.
Mesh * Solid_mesh_pt
the pointer to the mesh of block preconditionable solid elements.
void return_block_vector(const unsigned &n, const DoubleVector &b, DoubleVector &v) const
Takes the n-th block ordered vector, b, and copies its entries to the appropriate entries in the natu...
unsigned ndof_types_in_mesh(const unsigned &i) const
Return the number of DOF types in mesh i. WARNING: This should only be used by the upper-most master ...
A class for compressed row matrices. This is a distributable object.
LinearAlgebraDistribution * distribution_pt() const
access to the LinearAlgebraDistribution
Preconditioner * F_preconditioner_pt
Pointer to the 'preconditioner' for the F matrix.
bool Doc_time
Set Doc_time to true for outputting results of timings.
void build_without_copy(const unsigned &ncol, const unsigned &nnz, double *value, int *column_index, int *row_start)
keeps the existing distribution and just matrix that is stored without copying the matrix data ...