26 #include "order/order_internal.h"
30 #if defined( PASTIX_WITH_MPI )
81 const spmatrix_t *spm = pastix_data->
csc;
88 assert( nrhs * ldval == size_val );
91 for ( idx = 0; idx < size_idx; idx++, indexes++, cnt += dofi ) {
93 ige = ( dof > 0 ) ? ig * dof : dofs[ig];
94 dofi = ( dof > 0 ) ? dof : dofs[ig+1] - dofs[ig];
96 for ( j = 0; j < nrhs; j++ ) {
97 memcpy( b + ige + j * ldb, values + cnt + j * ldval, dofi *
sizeof(
double) );
146 double *val_buf = NULL;
154 MALLOC_INTERN( val_buf, rhs_comm->
max_val,
double );
157 for ( c = 0; c < clustnbr; c++ ) {
159 sends = &( data_comm->
nsends );
160 recvs = &( data_comm->
nrecvs );
162 if ( c == clustnum ) {
164 if ( sends->
idxcnt > 0 ) {
167 MPI_Bcast( Pb->
b, sends->
valcnt, PASTIX_MPI_DOUBLE,
175 MPI_Bcast( idx_buf, recvs->
idxcnt, PASTIX_MPI_INT, c, rhs_comm->
comm );
176 MPI_Bcast( val_buf, recvs->
valcnt, PASTIX_MPI_DOUBLE, c, rhs_comm->
comm );
179 bvec_dhandle_recv_backward_rep( pastix_data, nrhs, b, ldb, idx_buf, val_buf,
186 memFree_null( idx_buf );
187 memFree_null( val_buf );
222 for ( c = 0; c < clustnbr; c ++ ) {
225 sends = &( data_comm->
nsends );
227 if ( c == clustnum ) {
282 const double *values,
285 const spmatrix_t *spm = pastix_data->
csc;
290 for ( idx = 0; idx < size_idx; idx++, indexes++ ) {
292 ilpe = bvec_glob2Ploc( pastix_data, ig );
294 dofi = ( dof > 0 ) ? dof : dofs[ig+1] - dofs[ig];
296 for ( j = 0; j < nrhs; j++, values += dofi ) {
297 memcpy( pb + ilpe + j * ldpb, values, dofi *
sizeof(
double) );
350 const double *values,
353 const spmatrix_t *spm = pastix_data->
csc;
358 for ( idx = 0; idx < size_idx; idx++, indexes++ ) {
360 ile = bvec_Pglob2loc( pastix_data, glob2loc, igp );
363 for ( j = 0; j < nrhs; j++, values += dofi ) {
364 memcpy( b + ile + j * ldb, values, dofi *
sizeof(
double) );
435 const double *values,
439 bvec_dhandle_recv_forward_dst( pastix_data, nrhs, Pb->
b, Pb->
ld, indexes, values, size_idx );
442 bvec_dhandle_recv_backward_dst( pastix_data, nrhs, b, ldb, glob2loc, indexes, values, size_idx );
509 double *val_buf = NULL;
513 MPI_Status statuses[(clustnbr-1)*2];
514 MPI_Request requests[(clustnbr-1)*2];
521 MALLOC_INTERN( val_buf, rhs_comm->
max_val,
double );
524 c_send = (clustnum+1) % clustnbr;
525 for ( k = 0; k < clustnbr-1; k++ ) {
526 data_send = data_comm + c_send;
527 sends = &( data_send->
nsends );
529 if ( c_send == clustnum ) {
534 if ( sends->idxcnt > 0 ) {
535 MPI_Isend( data_send->
send_idxbuf, sends->idxcnt, PASTIX_MPI_INT, c_send,
536 PastixTagIndexes, rhs_comm->
comm, &requests[counter_req++] );
537 MPI_Isend( data_send->
send_valbuf, sends->valcnt, PASTIX_MPI_DOUBLE, c_send,
538 PastixTagValues, rhs_comm->
comm, &requests[counter_req++] );
540 c_send = (c_send+1) % clustnbr;
543 c_recv = (clustnum-1+clustnbr) % clustnbr;
544 for ( k = 0; k < clustnbr-1; k++ ) {
545 data_recv = data_comm + c_recv;
546 recvs = &( data_recv->
nrecvs );
548 if ( ( rhs_comm->
max_idx > 0 ) && ( recvs->idxcnt > 0 ) ) {
549 MPI_Recv( idx_buf, recvs->idxcnt, PASTIX_MPI_INT, c_recv, PastixTagIndexes,
550 rhs_comm->
comm, MPI_STATUS_IGNORE );
551 MPI_Recv( val_buf, recvs->valcnt, PASTIX_MPI_DOUBLE, c_recv, PastixTagValues,
552 rhs_comm->
comm, MPI_STATUS_IGNORE );
554 assert( recvs->idxcnt <= recvs->valcnt );
555 bvec_dhandle_recv_dst( pastix_data, dir, nrhs, b, ldb, Pb, glob2loc,
556 idx_buf, val_buf, recvs->idxcnt );
558 c_recv = (c_recv-1+clustnbr) % clustnbr;
561 MPI_Waitall( counter_req, requests, statuses );
565 memFree_null( idx_buf );
566 memFree_null( val_buf );
BEGIN_C_DECLS typedef int pastix_int_t
pastix_int_t * send_idxbuf
bvec_data_amount_t nrecvs
bvec_data_amount_t nsends
bvec_proc_comm_t data_comm[1]
Information about the amount of data exchanged to permute the pivots.
Structure to manage communications with distributed rhs.
Informations of the data exchanged with other processes.
enum pastix_dir_e pastix_dir_t
Direction.
bvec_handle_comm_t * rhs_comm
Main PaStiX data structure.
Main PaStiX RHS structure.