PaStiX Handbook  6.2.1
cpucblk_dmpi_coeftab.c
Go to the documentation of this file.
1 /**
2  *
3  * @file cpucblk_dmpi_coeftab.c
4  *
5  * Precision dependent routines to send and receive cblks coeftab.
6  *
7  * @copyright 2015-2021 Bordeaux INP, CNRS (LaBRI UMR 5800), Inria,
8  * Univ. Bordeaux. All rights reserved.
9  *
10  * @version 6.2.1
11  * @author Pierre Ramet
12  * @author Mathieu Faverge
13  * @author Tony Delarue
14  * @author Nolan Bredel
15  * @date 2021-07-02
16  *
17  * @generated from /builds/solverstack/pastix/kernels/cpucblk_zmpi_coeftab.c, normal z -> d, Tue Apr 12 09:38:41 2022
18  *
19  **/
20 #include "common/common.h"
21 #include "blend/solver.h"
23 #include "kernels.h"
24 #include "pastix_dcores.h"
25 #include "pastix_dlrcores.h"
26 #include "cpucblk_dpack.h"
27 #include <lapacke.h>
28 
29 #if defined( PASTIX_WITH_MPI )
30 /**
31  *******************************************************************************
32  *
33  * @brief Asynchronously send a cblk to cblk->ownerid
34  *
35  *******************************************************************************
36  *
37  * @param[in] side
38  * Define which side of the cblk must be tested.
39  * @arg PastixLCoef if lower part only
40  * @arg PastixUCoef if upper part only
41  * @arg PastixLUCoef if both sides.
42  *
43  * @param[inout] solvmtx
44  * The solver matrix structure.
45  *
46  * @param[in] cblk
47  * The column block that will be sent.
48  *
49  *******************************************************************************/
50 void
51 cpucblk_disend( pastix_coefside_t side,
52  SolverMatrix *solvmtx,
53  SolverCblk *cblk )
54 {
55  MPI_Request request;
56  size_t bufsize;
57  void *buffer;
58  int rc;
59 
60  assert( cblk->cblktype & CBLK_FANIN );
61 
62  bufsize = cpucblk_dcompute_size( side, cblk );
63  buffer = cpucblk_dpack( side, cblk, bufsize );
64 
65 #if defined(PASTIX_DEBUG_MPI)
66  fprintf( stderr, "[%2d] Post Isend for cblk %ld toward %2d ( %ld Bytes )\n",
67  solvmtx->clustnum, (long)cblk->gcblknum, cblk->ownerid, bufsize );
68 #endif
69 
70  rc = MPI_Isend( buffer, bufsize, MPI_CHAR,
71  cblk->ownerid, cblk->gcblknum, solvmtx->solv_comm, &request );
72  assert( rc == MPI_SUCCESS );
73 
74  solverCommMatrixAdd( solvmtx, cblk->ownerid, bufsize );
75 
76  /* Register the request to make it progress */
77  pastix_atomic_lock( &(solvmtx->reqlock) );
78 
79  assert( solvmtx->reqidx[ solvmtx->reqnum ] == -1 );
80  assert( solvmtx->reqnum >= 0 );
81  assert( solvmtx->reqnum < solvmtx->reqnbr );
82 
83  solvmtx->reqtab[ solvmtx->reqnum ] = request;
84  solvmtx->reqidx[ solvmtx->reqnum ] = cblk - solvmtx->cblktab;
85  solvmtx->reqnum++;
86 
87  pastix_atomic_unlock( &(solvmtx->reqlock) );
88 
89  (void)rc;
90 }
91 
92 /**
93  *******************************************************************************
94  *
95  * @brief Handle a finished request on a fanin
96  *
97  *******************************************************************************
98  *
99  * @param[in] side
100  * Define which side of the cblk are concerned.
101  * @arg PastixLCoef if lower part only
102  * @arg PastixUCoef if upper part only
103  * @arg PastixLUCoef if both sides.
104  *
105  * @param[in] solvmtx
106  * The solver matrix structure.
107  *
108  * @param[inout] cblk
109  * The cblk concerned by the computation.
110  *
111  *******************************************************************************/
112 static inline void
113 cpucblk_drequest_handle_fanin( pastix_coefside_t side,
114  const SolverMatrix *solvmtx,
115  SolverCblk *cblk )
116 {
117  assert( cblk->cblktype & CBLK_FANIN );
118 
119 #if defined(PASTIX_DEBUG_MPI)
120  {
121  size_t cblksize = ( side == PastixLUCoef ) ? 2 : 1;
122  cblksize = cblksize * cblk_colnbr( cblk ) * cblk->stride * sizeof(double);
123 
124  fprintf( stderr, "[%2d] Isend for cblk %ld toward %2d ( %ld Bytes ) (DONE)\n",
125  solvmtx->clustnum, (long)cblk->gcblknum, cblk->ownerid, (long)cblksize );
126  }
127 #endif
128  cpucblk_dfree( side, cblk );
129 
130  (void)solvmtx;
131 }
132 
133 /**
134  *******************************************************************************
135  *
136  * @brief Handle a finished request on a recv cblk.
137  *
138  *******************************************************************************
139  *
140  * @param[in] side
141  * Define which side of the cblk are concerned.
142  * @arg PastixLCoef if lower part only
143  * @arg PastixUCoef if upper part only
144  * @arg PastixLUCoef if both sides.
145  *
146  * @param[inout] solvmtx
147  * The solver matrix structure.
148  *
149  * @param[inout] cblk
150  * The cblk concerned by the computation.
151  *
152  *******************************************************************************/
153 static inline void
154 cpucblk_drequest_handle_recv( pastix_coefside_t side,
155  SolverMatrix *solvmtx,
156  int threadid,
157  const MPI_Status *status,
158  char *recvbuf )
159 {
160  SolverCblk *cblk, *fcbk;
161  int src = status->MPI_SOURCE;
162  int tag = status->MPI_TAG;
163 
164  assert( ( 0 <= src ) && ( src < solvmtx->clustnbr ) );
165  assert( ( 0 <= tag ) && ( tag < solvmtx->gcblknbr ) );
166 
167  /*
168  * Let's look for the local cblk
169  */
170  fcbk = solvmtx->cblktab + solvmtx->gcbl2loc[ tag ];
171  cblk = fcbk--;
172 
173  /* Get through source */
174  while( cblk->ownerid != src ) {
175  cblk--;
176  assert( cblk >= solvmtx->cblktab );
177  assert( cblk->gcblknum == tag );
178  assert( cblk->cblktype & CBLK_RECV );
179  }
180 
181 #if defined(PASTIX_DEBUG_MPI)
182  {
183  pastix_int_t size = (cblk_colnbr(cblk) * cblk->stride) * sizeof(double);
184  int count = 0;
185 
186  if ( side != PastixLCoef ) {
187  size *= 2;
188  }
189 
190  MPI_Get_count( status, MPI_CHAR, &count );
191  assert( (cblk->cblktype & CBLK_COMPRESSED) ||
192  (!(cblk->cblktype & CBLK_COMPRESSED) && (count == size)) );
193 
194  /* We can't know the sender easily, so we don't print it */
195  fprintf( stderr, "[%2d] Irecv of size %d/%ld for cblk %ld (DONE)\n",
196  solvmtx->clustnum, count, (long)size, (long)cblk->gcblknum );
197  }
198 #endif
199 
200  /* Initialize the cblk with the reception buffer */
201  cblk->threadid = (fcbk->threadid == -1) ? threadid : fcbk->threadid;
202  cpucblk_dunpack( side, cblk, recvbuf );
203 
204  fcbk = solvmtx->cblktab + cblk->fblokptr->fcblknm;
205  cpucblk_dadd( PastixLCoef, 1., cblk, fcbk, &solvmtx->lowrank );
206 
207  /* If side is LU, let's add the U part too */
208  if ( side != PastixLCoef ) {
209  cpucblk_dadd( PastixUCoef, 1., cblk, fcbk, &solvmtx->lowrank );
210  }
211 
212  /* Receptions cblks contribute to themselves */
213  cpucblk_drelease_deps( side, solvmtx, cblk, fcbk );
214 
215  /* Free the CBLK_RECV */
216  cpucblk_dfree( side, cblk );
217 }
218 
219 /**
220  *******************************************************************************
221  *
222  * @brief Handle a finished request.
223  *
224  * If cblktype & CBLK_FANIN : Will deallocate the coeftab
225  * If cblktype & CBLK_RECV : Will add cblk and deallocate the coeftab
226  *
227  *******************************************************************************
228  *
229  * @param[in] side
230  * Define which side of the cblk are concerned.
231  * @arg PastixLCoef if lower part only
232  * @arg PastixUCoef if upper part only
233  * @arg PastixLUCoef if both sides.
234  *
235  * @param[inout] solvmtx
236  * The solver matrix structure.
237  *
238  * @param[in] threadid
239  * Id of the thread calling this method.
240  *
241  * @param[in] outcount
242  * Amount of finshed requests
243  *
244  * @param[in] indexes
245  * Array of completed requests
246  *
247  * @param[in] statuses
248  * Array of statuses for the completed requests
249  *
250  *******************************************************************************/
251 static inline int
252 cpucblk_drequest_handle( pastix_coefside_t side,
253  SolverMatrix *solvmtx,
254  int threadid,
255  int outcount,
256  const int *indexes,
257  const MPI_Status *statuses )
258 {
259  pastix_int_t i, reqid;
260  int nbrequest = outcount;
261 
262  for( i = 0; i < outcount; i++ ){
263  reqid = indexes[i];
264 
265  /*
266  * Handle the reception
267  */
268  if ( solvmtx->reqidx[reqid] == -1 ) {
269  /* We're on a cblk recv, copy datas and restart communications */
270  char *recvbuf;
271  MPI_Status status;
272  int size;
273 
274  memcpy( &status, statuses + i, sizeof(MPI_Status) );
275  MPI_Get_count( &status, MPI_CHAR, &size );
276 
277  MALLOC_INTERN( recvbuf, size, char );
278  memcpy( recvbuf, solvmtx->rcoeftab, size );
279 
280  solvmtx->recvcnt--;
281 
282  /* Let's restart the communication */
283  if ( solvmtx->recvcnt > 0 ) {
284  MPI_Start( solvmtx->reqtab + reqid );
285  nbrequest--;
286  }
287  else {
288  MPI_Request_free( solvmtx->reqtab + reqid );
289  solvmtx->reqtab[reqid] = MPI_REQUEST_NULL;
290  }
291 
292  cpucblk_drequest_handle_recv( side, solvmtx, threadid,
293  &status, recvbuf );
294  }
295  /*
296  * Handle the emission
297  */
298  else {
299  SolverCblk *cblk = solvmtx->cblktab + solvmtx->reqidx[ reqid ];
300  assert( cblk->cblktype & CBLK_FANIN );
301 
302  cpucblk_drequest_handle_fanin( side, solvmtx, cblk );
303 
304 #if !defined(NDEBUG)
305  solvmtx->reqidx[ reqid ] = -1;
306 #endif
307  solvmtx->fanincnt--;
308  }
309  }
310 
311  return nbrequest;
312 }
313 
314 /**
315  *******************************************************************************
316  *
317  * @brief Update Request array ands Request indexes in a contiguous way.
318  *
319  *******************************************************************************
320  *
321  * @param[inout] solvmtx
322  * The solver matrix structure with the updated arrays.
323  *
324  *******************************************************************************/
325 static inline void
326 cpucblk_dupdate_reqtab( SolverMatrix *solvmtx )
327 {
328  /* Pointer to the compressed array of request */
329  MPI_Request *outrequest = solvmtx->reqtab;
330  pastix_int_t *outreqloc = solvmtx->reqidx;
331  int outreqnbr = 0;
332 
333  /* Pointer to the input array of request */
334  MPI_Request *inrequest = solvmtx->reqtab;
335  pastix_int_t *inreqloc = solvmtx->reqidx;
336  int inreqnbr = 0;
337 
338  /* Look for the first completed request */
339  while( (outreqnbr < solvmtx->reqnum) &&
340  (*outrequest != MPI_REQUEST_NULL) )
341  {
342  outrequest++;
343  outreqnbr++;
344  outreqloc++;
345  }
346 
347  inrequest = outrequest;
348  inreqloc = outreqloc;
349  inreqnbr = outreqnbr;
350  for( ; inreqnbr < solvmtx->reqnum;
351  inreqnbr++, inrequest++, inreqloc++ )
352  {
353  if ( *inrequest == MPI_REQUEST_NULL )
354  {
355  continue;
356  }
357 
358  /* Pack the uncompleted request */
359  *outrequest = *inrequest;
360  *outreqloc = *inreqloc;
361 
362  /* Move to the next one */
363  outrequest++;
364  outreqloc++;
365  outreqnbr++;
366  }
367 
368 #if !defined(NDEBUG)
369  /* Set to -1 remaining of the array */
370  memset( outreqloc, 0xff, (solvmtx->reqnbr - outreqnbr) * sizeof(pastix_int_t) );
371 #endif
372 
373 #if defined(PASTIX_DEBUG_MPI)
374  int i;
375  for( i = outreqnbr; i < solvmtx->reqnbr; i++ )
376  {
377  solvmtx->reqtab[i] = MPI_REQUEST_NULL;
378  }
379 #endif
380  assert( outreqnbr < solvmtx->reqnum );
381  solvmtx->reqnum = outreqnbr;
382 }
383 
384 /**
385  *******************************************************************************
386  *
387  * @brief Progress communications for one process
388  *
389  * If a communication is completed, it will be treated.
390  * If cblktype & CBLK_FANIN : Will deallocate coeftab
391  * If cblktype & CBLK_RECV : Will add cblk to fcblk
392  *
393  *******************************************************************************
394  *
395  * @param[in] side
396  * Define which side of the cblk must be tested.
397  * @arg PastixLCoef if lower part only
398  * @arg PastixUCoef if upper part only
399  * @arg PastixLUCoef if both sides.
400  *
401  * @param[inout] solvmtx
402  * The solver matrix structure.
403  *
404  * @param[in] threadid
405  * Id of the thread calling this method.
406  *
407  *******************************************************************************/
408 void
409 cpucblk_dmpi_progress( pastix_coefside_t side,
410  SolverMatrix *solvmtx,
411  int threadid )
412 {
413  pthread_t tid = pthread_self();
414  int outcount = 1;
415  int nbrequest, nbfree;
416  int indexes[ solvmtx->reqnbr ];
417  MPI_Status statuses[ solvmtx->reqnbr ];
418 
419  /* Check if someone is already communicating or not */
420  pthread_mutex_lock( &pastix_comm_lock );
421  if ( pastix_comm_tid == (pthread_t)-1 ) {
422  pastix_comm_tid = tid;
423  }
424  pthread_mutex_unlock( &pastix_comm_lock );
425 
426  if ( tid != pastix_comm_tid ) {
427  return;
428  }
429 
430  /*
431  * Let's register the number of active requests.
432  * We now suppose that the current thread is working on the first nbrequest
433  * active in the reqtab array. Additional requests can be posted during this
434  * progression, but it will be with a larger index. Thus, we do not need to
435  * protect every changes in these requests.
436  * When this is done, the requests arrays is locked to be packed, and the
437  * number of requests is updated for the next round.
438  */
439  pastix_atomic_lock( &(solvmtx->reqlock) );
440  nbrequest = solvmtx->reqnum;
441  pastix_atomic_unlock( &(solvmtx->reqlock) );
442 
443  while( (outcount > 0) && (nbrequest > 0) )
444  {
445  MPI_Testsome( nbrequest, solvmtx->reqtab, &outcount, indexes, statuses );
446  nbfree = 0;
447 
448  /* Handle all the completed requests */
449  if ( outcount > 0 ) {
450  nbfree = cpucblk_drequest_handle( side, solvmtx, threadid,
451  outcount, indexes, statuses );
452  }
453 
454  /*
455  * Pack the request arrays, and update the number of active requests by
456  * removing the completed ones
457  */
458  pastix_atomic_lock( &(solvmtx->reqlock) );
459  if ( nbfree > 0 ) {
460  cpucblk_dupdate_reqtab( solvmtx );
461  }
462  nbrequest = solvmtx->reqnum;
463  pastix_atomic_unlock( &(solvmtx->reqlock) );
464  }
465 
466  pastix_comm_tid = -1;
467 }
468 #endif /* defined(PASTIX_WITH_MPI) */
469 
470 /**
471  *******************************************************************************
472  *
473  * @brief Wait for incoming dependencies, and return when cblk->ctrbcnt has reached 0.
474  *
475  *******************************************************************************
476  *
477  * @param[in] mt_flag
478  * @arg 0, the function is called in a sequential environment, and we
479  * can wait on each communication.
480  * @arg 1, the function is called in a multi-threaded environment, and
481  * we need to test the communication to avoid dead locks.
482  *
483  * @param[in] side
484  * Define which side of the cblk must be released.
485  * @arg PastixLCoef if lower part only
486  * @arg PastixUCoef if upper part only
487  * @arg PastixLUCoef if both sides.
488  *
489  * @param[inout] solvmtx
490  * The solver matrix structure.
491  *
492  * @param[inout] cblk
493  * The column block that contribute to fcblk.
494  *
495  * @return 1 if the cblk is a fanin, 0 otherwise
496  *
497  *******************************************************************************/
498 int
500  pastix_coefside_t side,
501  SolverMatrix *solvmtx,
502  SolverCblk *cblk )
503 {
504 #if defined(PASTIX_WITH_MPI)
505  if ( cblk->cblktype & CBLK_FANIN ) {
506  /*
507  * We are in the sequential case, we progress on communications and
508  * return if nothing.
509  */
510  //cpucblk_dtestsome( side, solvmtx );
511  return 1;
512  }
513 
514  if ( cblk->cblktype & CBLK_RECV ) {
515  return 1;
516  }
517 
518  /* Make sure we receive every contribution */
519  while( cblk->ctrbcnt > 0 ) {
520  cpucblk_dmpi_progress( side, solvmtx, rank );
521  }
522 #else
523  assert( !(cblk->cblktype & (CBLK_FANIN | CBLK_RECV)) );
524  do { } while( cblk->ctrbcnt > 0 );
525 #endif
526 
527  (void)rank;
528  (void)side;
529  (void)solvmtx;
530 
531  return 0;
532 }
533 
534 /**
535  *******************************************************************************
536  *
537  * @brief Release the dependencies of the given cblk after an update.
538  *
539  *******************************************************************************
540  *
541  * @param[in] side
542  * Define which side of the cblk must be released.
543  * @arg PastixLCoef if lower part only
544  * @arg PastixUCoef if upper part only
545  * @arg PastixLUCoef if both sides.
546  *
547  * @param[inout] solvmtx
548  * The solver matrix structure.
549  *
550  * @param[in] cblk
551  * The column block that contribute to fcblk.
552  *
553  * @param[inout] fcbk
554  * The facing column block that is updated by cblk.
555  *
556  *******************************************************************************/
557 void
559  SolverMatrix *solvmtx,
560  const SolverCblk *cblk,
561  SolverCblk *fcbk )
562 {
563  int32_t ctrbcnt;
564  ctrbcnt = pastix_atomic_dec_32b( &(fcbk->ctrbcnt) );
565  if ( !ctrbcnt ) {
566 #if defined(PASTIX_WITH_MPI)
567  if ( fcbk->cblktype & CBLK_FANIN ) {
568  cpucblk_disend( side, solvmtx, fcbk );
569  return;
570  }
571 #else
572  (void)side;
573 #endif
574  if ( solvmtx->computeQueue ) {
575  pastix_queue_t *queue = solvmtx->computeQueue[ cblk->threadid ];
576  pqueuePush1( queue, fcbk - solvmtx->cblktab, queue->size );
577  }
578  }
579 }
580 
581 /**
582  *******************************************************************************
583  *
584  * @brief Waitall routine for current cblk request
585  *
586  * It may be possible that some cblk will not be deallocated with the static
587  * scheduler. So a cleanup may be necessary.
588  *
589  *******************************************************************************
590  *
591  * @param[in] side
592  * Define which side of the cblk must be tested.
593  * @arg PastixLCoef if lower part only
594  * @arg PastixUCoef if upper part only
595  * @arg PastixLUCoef if both sides.
596  *
597  * @param[in] sched
598  * Define which sched is used
599  * @arg PastixSchedSequential if sequential
600  * @arg PastixSchedStatic if multi-threaded static scheduler
601  * @arg PastixSchedDynamic if multi-threaded dynamic scheduler
602  * No other scheduler is supported.
603  *
604  * @param[inout] solvmtx
605  * The solver matrix structure.
606  *
607  *******************************************************************************/
608 void
610  pastix_int_t sched,
611  SolverMatrix *solvmtx )
612 {
613  if ( (sched != PastixSchedSequential) &&
614  (sched != PastixSchedStatic) &&
615  (sched != PastixSchedDynamic) )
616  {
617  return;
618  }
619 #if defined(PASTIX_WITH_MPI)
620  pastix_int_t i;
621  int rc;
622  SolverCblk *cblk;
623  int reqnbr = solvmtx->reqnum;
624  MPI_Status status;
625 
626 #if defined(PASTIX_DEBUG_MPI)
627  fprintf( stderr, "[%2d] Wait for all pending communications\n",
628  solvmtx->clustnum );
629 #endif
630 
631  for( i=0; i<reqnbr; i++ )
632  {
633  if ( solvmtx->reqtab[i] == MPI_REQUEST_NULL ) {
634  assert( 0 /* MPI_REQUEST_NULL should have been pushed to the end */ );
635  solvmtx->reqnum--;
636  continue;
637  }
638 
639  /* Make sure that we don't have an already cleaned request in dynamic */
640  assert( solvmtx->reqidx[i] != -1 );
641 
642  rc = MPI_Wait( solvmtx->reqtab + i, &status );
643  assert( rc == MPI_SUCCESS );
644 
645  cblk = solvmtx->cblktab + solvmtx->reqidx[i];
646 
647  /* We should wait only for fanin */
648  assert( cblk->cblktype & CBLK_FANIN );
649 
650  cpucblk_drequest_handle_fanin( side, solvmtx, cblk );
651 
652  solvmtx->reqnum--;
653  }
654  assert( solvmtx->reqnum == 0 );
655  (void)rc;
656 #else
657  (void)side;
658  (void)solvmtx;
659 #endif
660 }
solver_cblk_s::ownerid
int ownerid
Definition: solver.h:146
solver_cblk_s::threadid
int threadid
Definition: solver.h:147
solver.h
cpucblk_dpack.h
cblk_colnbr
static pastix_int_t cblk_colnbr(const SolverCblk *cblk)
Compute the number of columns in a column block.
Definition: solver.h:247
solver_cblk_s::fblokptr
SolverBlok * fblokptr
Definition: solver.h:134
pastix_queue_s
Queue structure.
Definition: queue.h:38
solver_cblk_s::stride
pastix_int_t stride
Definition: solver.h:135
cpucblk_dunpack
void cpucblk_dunpack(pastix_coefside_t side, SolverCblk *cblk, void *buffer)
Unpack data and fill the column block concerned by the computation.
Definition: cpucblk_dpack.c:431
solver_cblk_s
Solver column block structure.
Definition: solver.h:127
pastix_coefside_t
enum pastix_coefside_e pastix_coefside_t
Data blocks used in the kernel.
cpucblk_dfree
void cpucblk_dfree(pastix_coefside_t side, SolverCblk *cblk)
Free the cblk structure that store the coefficient.
Definition: cpucblk_dinit.c:317
solver_comm_matrix.h
kernels.h
pastix_dcores.h
PastixSchedSequential
@ PastixSchedSequential
Definition: api.h:313
cpucblk_dcompute_size
size_t cpucblk_dcompute_size(pastix_coefside_t side, const SolverCblk *cblk)
Compute the size of the buffer to send.
Definition: cpucblk_dpack.c:131
PastixUCoef
@ PastixUCoef
Definition: api.h:457
PastixLCoef
@ PastixLCoef
Definition: api.h:456
solver_cblk_s::ctrbcnt
volatile int32_t ctrbcnt
Definition: solver.h:129
pqueuePush1
static void pqueuePush1(pastix_queue_t *q, pastix_int_t elt, double key1)
Push an element with a single key.
Definition: queue.h:64
solver_cblk_s::gcblknum
pastix_int_t gcblknum
Definition: solver.h:140
pastix_queue_s::size
pastix_int_t size
Definition: queue.h:39
cpucblk_dpack
void * cpucblk_dpack(pastix_coefside_t side, SolverCblk *cblk, size_t size)
Pack a column block (Full rank or low rank).
Definition: cpucblk_dpack.c:400
solver_cblk_s::cblktype
int8_t cblktype
Definition: solver.h:130
PastixLUCoef
@ PastixLUCoef
Definition: api.h:458
cpucblk_drelease_deps
void cpucblk_drelease_deps(pastix_coefside_t side, SolverMatrix *solvmtx, const SolverCblk *cblk, SolverCblk *fcbk)
Release the dependencies of the given cblk after an update.
Definition: cpucblk_dmpi_coeftab.c:558
cpucblk_drequest_cleanup
void cpucblk_drequest_cleanup(pastix_coefside_t side, pastix_int_t sched, SolverMatrix *solvmtx)
Waitall routine for current cblk request.
Definition: cpucblk_dmpi_coeftab.c:609
cpucblk_dincoming_deps
int cpucblk_dincoming_deps(int rank, pastix_coefside_t side, SolverMatrix *solvmtx, SolverCblk *cblk)
Wait for incoming dependencies, and return when cblk->ctrbcnt has reached 0.
Definition: cpucblk_dmpi_coeftab.c:499
cpucblk_dadd
void cpucblk_dadd(pastix_coefside_t side, double alpha, const SolverCblk *cblkA, SolverCblk *cblkB, const pastix_lr_t *lowrank)
Add two column bloks in full rank format.
Definition: cpucblk_dadd.c:358
pastix_dlrcores.h
PastixSchedStatic
@ PastixSchedStatic
Definition: api.h:314
PastixSchedDynamic
@ PastixSchedDynamic
Definition: api.h:317
solver_blok_s::fcblknm
pastix_int_t fcblknm
Definition: solver.h:110