PaStiX Handbook  6.3.0
cpucblk_zmpi_coeftab.c
Go to the documentation of this file.
1 /**
2  *
3  * @file cpucblk_zmpi_coeftab.c
4  *
5  * Precision dependent routines to send and receive cblks coeftab.
6  *
7  * @copyright 2015-2023 Bordeaux INP, CNRS (LaBRI UMR 5800), Inria,
8  * Univ. Bordeaux. All rights reserved.
9  *
10  * @version 6.3.0
11  * @author Pierre Ramet
12  * @author Mathieu Faverge
13  * @author Tony Delarue
14  * @author Nolan Bredel
15  * @date 2022-10-18
16  *
17  * @generated from /builds/solverstack/pastix/kernels/cpucblk_zmpi_coeftab.c, normal z -> z, Thu Jun 8 12:22:43 2023
18  *
19  **/
20 #include "common/common.h"
21 #include "blend/solver.h"
23 #include "kernels.h"
24 #include "pastix_zcores.h"
25 #include "pastix_zlrcores.h"
26 #include "cpucblk_zpack.h"
27 #include <lapacke.h>
28 
29 #if defined( PASTIX_WITH_MPI )
30 /**
31  *******************************************************************************
32  *
33  * @brief Asynchronously send a cblk to cblk->ownerid
34  *
35  *******************************************************************************
36  *
37  * @param[in] side
38  * Define which side of the cblk must be tested.
39  * @arg PastixLCoef if lower part only
40  * @arg PastixUCoef if upper part only
41  * @arg PastixLUCoef if both sides.
42  *
43  * @param[inout] solvmtx
44  * The solver matrix structure.
45  *
46  * @param[in] cblk
47  * The column block that will be sent.
48  *
49  *******************************************************************************/
50 void
51 cpucblk_zisend( pastix_coefside_t side,
52  SolverMatrix *solvmtx,
53  SolverCblk *cblk )
54 {
55  MPI_Request request;
56  size_t bufsize;
57  void *buffer;
58  int rc;
59 
60  assert( cblk->cblktype & CBLK_FANIN );
61 
62  bufsize = cpucblk_zcompute_size( side, cblk );
63  buffer = cpucblk_zpack( side, cblk, bufsize );
64 
65  /* Backup buffer pointer when packed */
66  if ( cblk->cblktype & CBLK_COMPRESSED ) {
67  cpucblk_zfree( side, cblk );
68  if ( side != PastixUCoef ) {
69  cblk->lcoeftab = buffer;
70  }
71  else {
72  cblk->ucoeftab = buffer;
73  }
74  }
75 
76 #if defined(PASTIX_DEBUG_MPI)
77  fprintf( stderr, "[%2d] Post Isend for cblk %ld toward %2d ( %ld Bytes )\n",
78  solvmtx->clustnum, (long)cblk->gcblknum, cblk->ownerid, bufsize );
79 #endif
80 
81  rc = MPI_Isend( buffer, bufsize, MPI_CHAR,
82  cblk->ownerid, cblk->gcblknum, solvmtx->solv_comm, &request );
83  assert( rc == MPI_SUCCESS );
84 
85  solverCommMatrixAdd( solvmtx, cblk->ownerid, bufsize );
86 
87  /* Register the request to make it progress */
88  pastix_atomic_lock( &(solvmtx->reqlock) );
89 
90  assert( solvmtx->reqidx[ solvmtx->reqnum ] == -1 );
91  assert( solvmtx->reqnum >= 0 );
92  assert( solvmtx->reqnum < solvmtx->reqnbr );
93 
94  solvmtx->reqtab[ solvmtx->reqnum ] = request;
95  solvmtx->reqidx[ solvmtx->reqnum ] = cblk - solvmtx->cblktab;
96  solvmtx->reqnum++;
97 
98  pastix_atomic_unlock( &(solvmtx->reqlock) );
99 
100  (void)rc;
101 }
102 
103 /**
104  *******************************************************************************
105  *
106  * @brief Handle a finished request on a fanin
107  *
108  *******************************************************************************
109  *
110  * @param[in] side
111  * Define which side of the cblk are concerned.
112  * @arg PastixLCoef if lower part only
113  * @arg PastixUCoef if upper part only
114  * @arg PastixLUCoef if both sides.
115  *
116  * @param[in] solvmtx
117  * The solver matrix structure.
118  *
119  * @param[inout] cblk
120  * The cblk concerned by the computation.
121  *
122  *******************************************************************************/
123 static inline void
124 cpucblk_zrequest_handle_fanin( pastix_coefside_t side,
125  const SolverMatrix *solvmtx,
126  SolverCblk *cblk )
127 {
128  assert( cblk->cblktype & CBLK_FANIN );
129 
130 #if defined(PASTIX_DEBUG_MPI)
131  {
132  size_t cblksize = ( side == PastixLUCoef ) ? 2 : 1;
133  cblksize = cblksize * cblk_colnbr( cblk ) * cblk->stride * sizeof(pastix_complex64_t);
134 
135  fprintf( stderr, "[%2d] Isend for cblk %ld toward %2d ( %ld Bytes ) (DONE)\n",
136  solvmtx->clustnum, (long)cblk->gcblknum, cblk->ownerid, (long)cblksize );
137  }
138 #endif
139 
140  if ( cblk->cblktype & CBLK_COMPRESSED ) {
141  /* Free the packed buffer */
142  if ( side != PastixUCoef ) {
143  memFree_null( cblk->lcoeftab );
144  }
145  else {
146  memFree_null( cblk->ucoeftab );
147  }
148  }
149  else {
150  /* Free the cblk */
151  cpucblk_zfree( side, cblk );
152  }
153 
154  (void)solvmtx;
155 }
156 
157 /**
158  *******************************************************************************
159  *
160  * @brief Handle a finished request on a recv cblk.
161  *
162  *******************************************************************************
163  *
164  * @param[in] side
165  * Define which side of the cblk are concerned.
166  * @arg PastixLCoef if lower part only
167  * @arg PastixUCoef if upper part only
168  * @arg PastixLUCoef if both sides.
169  *
170  * @param[inout] solvmtx
171  * The solver matrix structure.
172  *
173  * @param[inout] threadid
174  * TODO
175  *
176  * @param[inout] status
177  * TODO
178  *
179  * @param[inout] recvbuf
180  * TODO
181  *
182  *******************************************************************************/
183 static inline void
184 cpucblk_zrequest_handle_recv( pastix_coefside_t side,
185  SolverMatrix *solvmtx,
186  int threadid,
187  const MPI_Status *status,
188  char *recvbuf )
189 {
190  SolverCblk *cblk, *fcbk;
191  int src = status->MPI_SOURCE;
192  int tag = status->MPI_TAG;
193 
194  assert( ( 0 <= src ) && ( src < solvmtx->clustnbr ) );
195  assert( ( 0 <= tag ) && ( tag < solvmtx->gcblknbr ) );
196 
197  /*
198  * Let's look for the local cblk
199  */
200  fcbk = solvmtx->cblktab + solvmtx->gcbl2loc[ tag ];
201  cblk = fcbk-1;
202 
203  /* Get through source */
204  while( cblk->ownerid != src ) {
205  cblk--;
206  assert( cblk >= solvmtx->cblktab );
207  assert( cblk->gcblknum == tag );
208  assert( cblk->cblktype & CBLK_RECV );
209  }
210  assert( fcbk == (solvmtx->cblktab + cblk->fblokptr->fcblknm) );
211 
212 #if defined(PASTIX_DEBUG_MPI)
213  {
214  int rc;
215  int count = 0;
216  pastix_int_t size = (cblk_colnbr(cblk) * cblk->stride) * sizeof(pastix_complex64_t);
217 
218  if ( side != PastixLCoef ) {
219  size *= 2;
220  }
221 
222  rc = MPI_Get_count( status, MPI_CHAR, &count );
223  assert( rc == MPI_SUCCESS );
224  assert( (cblk->cblktype & CBLK_COMPRESSED) ||
225  (!(cblk->cblktype & CBLK_COMPRESSED) && (count == size)) );
226 
227  /* We can't know the sender easily, so we don't print it */
228  fprintf( stderr, "[%2d] Irecv of size %d/%ld for cblk %ld (DONE)\n",
229  solvmtx->clustnum, count, (long)size, (long)cblk->gcblknum );
230  (void)rc;
231  }
232 #endif
233 
234  /* Initialize the cblk with the reception buffer */
235  cblk->threadid = (fcbk->threadid == -1) ? threadid : fcbk->threadid;
236  cpucblk_zunpack( side, cblk, recvbuf );
237 
238  fcbk = solvmtx->cblktab + cblk->fblokptr->fcblknm;
239  cpucblk_zadd( PastixLCoef, 1., cblk, fcbk, &solvmtx->lowrank );
240 
241  /* If side is LU, let's add the U part too */
242  if ( side != PastixLCoef ) {
243  cpucblk_zadd( PastixUCoef, 1., cblk, fcbk, &solvmtx->lowrank );
244  }
245 
246  /* Receptions cblks contribute to themselves */
247  cpucblk_zrelease_deps( side, solvmtx, cblk, fcbk );
248 
249  /* Free the CBLK_RECV */
250  cpucblk_zfree( side, cblk );
251 }
252 
253 /**
254  *******************************************************************************
255  *
256  * @brief Handle a finished request.
257  *
258  * If cblktype & CBLK_FANIN : Will deallocate the coeftab
259  * If cblktype & CBLK_RECV : Will add cblk and deallocate the coeftab
260  *
261  *******************************************************************************
262  *
263  * @param[in] side
264  * Define which side of the cblk are concerned.
265  * @arg PastixLCoef if lower part only
266  * @arg PastixUCoef if upper part only
267  * @arg PastixLUCoef if both sides.
268  *
269  * @param[inout] solvmtx
270  * The solver matrix structure.
271  *
272  * @param[in] threadid
273  * Id of the thread calling this method.
274  *
275  * @param[in] outcount
276  * Amount of finshed requests
277  *
278  * @param[in] indexes
279  * Array of completed requests
280  *
281  * @param[in] statuses
282  * Array of statuses for the completed requests
283  *
284  *******************************************************************************
285  *
286  * @retval TODO
287  *
288  *******************************************************************************/
289 static inline int
290 cpucblk_zrequest_handle( pastix_coefside_t side,
291  SolverMatrix *solvmtx,
292  int threadid,
293  int outcount,
294  const int *indexes,
295  const MPI_Status *statuses )
296 {
297  pastix_int_t i, reqid;
298  int nbrequest = outcount;
299 
300  for( i = 0; i < outcount; i++ ){
301  reqid = indexes[i];
302 
303  /*
304  * Handle the reception
305  */
306  if ( solvmtx->reqidx[reqid] == -1 ) {
307  /* We're on a cblk recv, copy datas and restart communications */
308  char *recvbuf;
309  MPI_Status status;
310  int size;
311 
312  memcpy( &status, statuses + i, sizeof(MPI_Status) );
313  MPI_Get_count( &status, MPI_CHAR, &size );
314 
315  MALLOC_INTERN( recvbuf, size, char );
316  memcpy( recvbuf, solvmtx->rcoeftab, size );
317 
318  solvmtx->recvcnt--;
319 
320  /* Let's restart the communication */
321  assert( solvmtx->recvcnt >= 0 );
322  if ( solvmtx->recvcnt > 0 ) {
323  MPI_Start( solvmtx->reqtab + reqid );
324  nbrequest--;
325  }
326  else {
327  MPI_Request_free( solvmtx->reqtab + reqid );
328  solvmtx->reqtab[reqid] = MPI_REQUEST_NULL;
329  }
330 
331  cpucblk_zrequest_handle_recv( side, solvmtx, threadid,
332  &status, recvbuf );
333  }
334  /*
335  * Handle the emission
336  */
337  else {
338  SolverCblk *cblk = solvmtx->cblktab + solvmtx->reqidx[ reqid ];
339  assert( cblk->cblktype & CBLK_FANIN );
340 
341  cpucblk_zrequest_handle_fanin( side, solvmtx, cblk );
342 
343 #if !defined(NDEBUG)
344  solvmtx->reqidx[ reqid ] = -1;
345 #endif
346  solvmtx->fanincnt--;
347  }
348  }
349 
350  return nbrequest;
351 }
352 
353 /**
354  *******************************************************************************
355  *
356  * @brief Update Request array ands Request indexes in a contiguous way.
357  *
358  *******************************************************************************
359  *
360  * @param[inout] solvmtx
361  * The solver matrix structure with the updated arrays.
362  *
363  *******************************************************************************/
364 void
365 cpucblk_zupdate_reqtab( SolverMatrix *solvmtx )
366 {
367  /* Pointer to the compressed array of request */
368  MPI_Request *outrequest = solvmtx->reqtab;
369  pastix_int_t *outreqloc = solvmtx->reqidx;
370  int outreqnbr = 0;
371 
372  /* Pointer to the input array of request */
373  MPI_Request *inrequest = solvmtx->reqtab;
374  pastix_int_t *inreqloc = solvmtx->reqidx;
375  int inreqnbr = 0;
376 
377  /* Look for the first completed request */
378  while( (outreqnbr < solvmtx->reqnum) &&
379  (*outrequest != MPI_REQUEST_NULL) )
380  {
381  outrequest++;
382  outreqnbr++;
383  outreqloc++;
384  }
385 
386  inrequest = outrequest;
387  inreqloc = outreqloc;
388  inreqnbr = outreqnbr;
389  for( ; inreqnbr < solvmtx->reqnum;
390  inreqnbr++, inrequest++, inreqloc++ )
391  {
392  if ( *inrequest == MPI_REQUEST_NULL )
393  {
394  continue;
395  }
396 
397  /* Pack the uncompleted request */
398  *outrequest = *inrequest;
399  *outreqloc = *inreqloc;
400 
401  /* Move to the next one */
402  outrequest++;
403  outreqloc++;
404  outreqnbr++;
405  }
406 
407 #if !defined(NDEBUG)
408  /* Set to -1 remaining of the array */
409  memset( outreqloc, 0xff, (solvmtx->reqnbr - outreqnbr) * sizeof(pastix_int_t) );
410 #endif
411 
412 #if defined(PASTIX_DEBUG_MPI)
413  int i;
414  for( i = outreqnbr; i < solvmtx->reqnbr; i++ )
415  {
416  solvmtx->reqtab[i] = MPI_REQUEST_NULL;
417  }
418 #endif
419  assert( outreqnbr < solvmtx->reqnum );
420  solvmtx->reqnum = outreqnbr;
421 }
422 
423 /**
424  *******************************************************************************
425  *
426  * @ingroup kernel_fact
427  * @brief Progress communications for one process
428  *
429  * If a communication is completed, it will be treated.
430  * If cblktype & CBLK_FANIN : Will deallocate coeftab
431  * If cblktype & CBLK_RECV : Will add cblk to fcblk
432  *
433  *******************************************************************************
434  *
435  * @param[in] side
436  * Define which side of the cblk must be tested.
437  * @arg PastixLCoef if lower part only
438  * @arg PastixUCoef if upper part only
439  * @arg PastixLUCoef if both sides.
440  *
441  * @param[inout] solvmtx
442  * The solver matrix structure.
443  *
444  * @param[in] threadid
445  * Id of the thread calling this method.
446  *
447  *******************************************************************************/
448 void
449 cpucblk_zmpi_progress( pastix_coefside_t side,
450  SolverMatrix *solvmtx,
451  int threadid )
452 {
453  pthread_t tid = pthread_self();
454  int outcount = 1;
455  int nbrequest, nbfree;
456  int indexes[ solvmtx->reqnbr ];
457  MPI_Status statuses[ solvmtx->reqnbr ];
458 
459  /* Check if someone is already communicating or not */
460  pthread_mutex_lock( &pastix_comm_lock );
461  if ( pastix_comm_tid == (pthread_t)-1 ) {
462  pastix_comm_tid = tid;
463  }
464  pthread_mutex_unlock( &pastix_comm_lock );
465 
466  if ( tid != pastix_comm_tid ) {
467  pastix_yield();
468  return;
469  }
470 
471  /*
472  * Let's register the number of active requests.
473  * We now suppose that the current thread is working on the first nbrequest
474  * active in the reqtab array. Additional requests can be posted during this
475  * progression, but it will be with a larger index. Thus, we do not need to
476  * protect every changes in these requests.
477  * When this is done, the requests arrays is locked to be packed, and the
478  * number of requests is updated for the next round.
479  */
480  pastix_atomic_lock( &(solvmtx->reqlock) );
481  nbrequest = solvmtx->reqnum;
482  pastix_atomic_unlock( &(solvmtx->reqlock) );
483 
484  while( (outcount > 0) && (nbrequest > 0) )
485  {
486  MPI_Testsome( nbrequest, solvmtx->reqtab, &outcount, indexes, statuses );
487  nbfree = 0;
488 
489  /* Handle all the completed requests */
490  if ( outcount > 0 ) {
491  nbfree = cpucblk_zrequest_handle( side, solvmtx, threadid,
492  outcount, indexes, statuses );
493  }
494 
495  /*
496  * Pack the request arrays, and update the number of active requests by
497  * removing the completed ones
498  */
499  pastix_atomic_lock( &(solvmtx->reqlock) );
500  if ( nbfree > 0 ) {
501  cpucblk_zupdate_reqtab( solvmtx );
502  }
503  nbrequest = solvmtx->reqnum;
504  pastix_atomic_unlock( &(solvmtx->reqlock) );
505  }
506 
507  pastix_comm_tid = -1;
508  pastix_yield();
509 }
510 #endif /* defined(PASTIX_WITH_MPI) */
511 
512 /**
513  *******************************************************************************
514  *
515  * @brief Wait for incoming dependencies, and return when cblk->ctrbcnt has
516  * reached 0.
517  *
518  *******************************************************************************
519  *
520  * @param[in] rank
521  * The rank of the current thread.
522  *
523  * @param[in] side
524  * Define which side of the cblk must be released.
525  * @arg PastixLCoef if lower part only
526  * @arg PastixUCoef if upper part only
527  * @arg PastixLUCoef if both sides.
528  *
529  * @param[inout] solvmtx
530  * The solver matrix structure.
531  *
532  * @param[inout] cblk
533  * The column block that contribute to fcblk.
534  *
535  *******************************************************************************
536  *
537  * @return 1 if the cblk is a fanin, 0 otherwise
538  *
539  *******************************************************************************/
540 int
542  pastix_coefside_t side,
543  SolverMatrix *solvmtx,
544  SolverCblk *cblk )
545 {
546 #if defined(PASTIX_WITH_MPI)
547  if ( cblk->cblktype & CBLK_FANIN ) {
548  /*
549  * We are in the sequential case, we progress on communications and
550  * return if nothing.
551  */
552  //cpucblk_ztestsome( side, solvmtx );
553  return 1;
554  }
555 
556  if ( cblk->cblktype & CBLK_RECV ) {
557  return 1;
558  }
559 
560  /* Make sure we receive every contribution */
561  while( cblk->ctrbcnt > 0 ) {
562  cpucblk_zmpi_progress( side, solvmtx, rank );
563  }
564 #else
565  assert( !(cblk->cblktype & (CBLK_FANIN | CBLK_RECV)) );
566  do { pastix_yield(); } while( cblk->ctrbcnt > 0 );
567 #endif
568 
569  (void)rank;
570  (void)side;
571  (void)solvmtx;
572 
573  return 0;
574 }
575 
576 /**
577  *******************************************************************************
578  *
579  * @brief Release the dependencies of the given cblk after an update.
580  *
581  *******************************************************************************
582  *
583  * @param[in] side
584  * Define which side of the cblk must be released.
585  * @arg PastixLCoef if lower part only
586  * @arg PastixUCoef if upper part only
587  * @arg PastixLUCoef if both sides.
588  *
589  * @param[inout] solvmtx
590  * The solver matrix structure.
591  *
592  * @param[in] cblk
593  * The column block that contribute to fcblk.
594  *
595  * @param[inout] fcbk
596  * The facing column block that is updated by cblk.
597  *
598  *******************************************************************************/
599 void
601  SolverMatrix *solvmtx,
602  const SolverCblk *cblk,
603  SolverCblk *fcbk )
604 {
605  int32_t ctrbcnt;
606  ctrbcnt = pastix_atomic_dec_32b( &(fcbk->ctrbcnt) );
607  if ( !ctrbcnt ) {
608 #if defined(PASTIX_WITH_MPI)
609  if ( fcbk->cblktype & CBLK_FANIN ) {
610  cpucblk_zisend( side, solvmtx, fcbk );
611  return;
612  }
613 #else
614  (void)side;
615 #endif
616  if ( solvmtx->computeQueue ) {
617  pastix_queue_t *queue = solvmtx->computeQueue[ cblk->threadid ];
618  pqueuePush1( queue, fcbk - solvmtx->cblktab, queue->size );
619  }
620  }
621 }
622 
623 /**
624  *******************************************************************************
625  *
626  * @brief Waitall routine for current cblk request
627  *
628  * It may be possible that some cblk will not be deallocated with the static
629  * scheduler. So a cleanup may be necessary.
630  *
631  *******************************************************************************
632  *
633  * @param[in] side
634  * Define which side of the cblk must be tested.
635  * @arg PastixLCoef if lower part only
636  * @arg PastixUCoef if upper part only
637  * @arg PastixLUCoef if both sides.
638  *
639  * @param[in] sched
640  * Define which sched is used
641  * @arg PastixSchedSequential if sequential
642  * @arg PastixSchedStatic if multi-threaded static scheduler
643  * @arg PastixSchedDynamic if multi-threaded dynamic scheduler
644  * No other scheduler is supported.
645  *
646  * @param[inout] solvmtx
647  * The solver matrix structure.
648  *
649  *******************************************************************************/
650 void
652  pastix_int_t sched,
653  SolverMatrix *solvmtx )
654 {
655  if ( (sched != PastixSchedSequential) &&
656  (sched != PastixSchedStatic) &&
657  (sched != PastixSchedDynamic) )
658  {
659  return;
660  }
661 #if defined(PASTIX_WITH_MPI)
662  pastix_int_t i;
663  int rc;
664  SolverCblk *cblk;
665  int reqnbr = solvmtx->reqnum;
666  MPI_Status status;
667 
668 #if defined(PASTIX_DEBUG_MPI)
669  fprintf( stderr, "[%2d] Wait for all pending communications\n",
670  solvmtx->clustnum );
671 #endif
672 
673  for( i=0; i<reqnbr; i++ )
674  {
675  if ( solvmtx->reqtab[i] == MPI_REQUEST_NULL ) {
676  assert( 0 /* MPI_REQUEST_NULL should have been pushed to the end */ );
677  solvmtx->reqnum--;
678  continue;
679  }
680 
681  /* Make sure that we don't have an already cleaned request in dynamic */
682  assert( solvmtx->reqidx[i] != -1 );
683 
684  rc = MPI_Wait( solvmtx->reqtab + i, &status );
685  assert( rc == MPI_SUCCESS );
686 
687  cblk = solvmtx->cblktab + solvmtx->reqidx[i];
688 
689  /* We should wait only for fanin */
690  assert( cblk->cblktype & CBLK_FANIN );
691 
692  cpucblk_zrequest_handle_fanin( side, solvmtx, cblk );
693 
694  solvmtx->reqnum--;
695  }
696  assert( solvmtx->reqnum == 0 );
697  (void)rc;
698 #else
699  (void)side;
700  (void)solvmtx;
701 #endif
702 }
size_t cpucblk_zcompute_size(pastix_coefside_t side, const SolverCblk *cblk)
Compute the size of the buffer to send.
void cpucblk_zunpack(pastix_coefside_t side, SolverCblk *cblk, void *buffer)
Unpack data and fill the column block concerned by the computation.
void * cpucblk_zpack(pastix_coefside_t side, SolverCblk *cblk, size_t size)
Pack a column block (Full rank or low rank).
pastix_int_t size
Definition: queue.h:39
static void pqueuePush1(pastix_queue_t *q, pastix_int_t elt, double key1)
Push an element with a single key.
Definition: queue.h:64
Queue structure.
Definition: queue.h:38
void cpucblk_zadd(pastix_coefside_t side, double alpha, const SolverCblk *cblkA, SolverCblk *cblkB, const pastix_lr_t *lowrank)
Add two column bloks in full rank format.
Definition: cpucblk_zadd.c:379
int cpucblk_zincoming_deps(int rank, pastix_coefside_t side, SolverMatrix *solvmtx, SolverCblk *cblk)
Wait for incoming dependencies, and return when cblk->ctrbcnt has reached 0.
void cpucblk_zrelease_deps(pastix_coefside_t side, SolverMatrix *solvmtx, const SolverCblk *cblk, SolverCblk *fcbk)
Release the dependencies of the given cblk after an update.
void cpucblk_zfree(pastix_coefside_t side, SolverCblk *cblk)
Free the cblk structure that store the coefficient.
void cpucblk_zrequest_cleanup(pastix_coefside_t side, pastix_int_t sched, SolverMatrix *solvmtx)
Waitall routine for current cblk request.
enum pastix_coefside_e pastix_coefside_t
Data blocks used in the kernel.
@ PastixLCoef
Definition: api.h:475
@ PastixLUCoef
Definition: api.h:477
@ PastixUCoef
Definition: api.h:476
@ PastixSchedStatic
Definition: api.h:334
@ PastixSchedDynamic
Definition: api.h:337
@ PastixSchedSequential
Definition: api.h:333
void * ucoeftab
Definition: solver.h:172
static pastix_int_t cblk_colnbr(const SolverCblk *cblk)
Compute the number of columns in a column block.
Definition: solver.h:322
pastix_int_t fcblknm
Definition: solver.h:140
volatile int32_t ctrbcnt
Definition: solver.h:158
SolverBlok * fblokptr
Definition: solver.h:163
pastix_int_t gcblknum
Definition: solver.h:169
int threadid
Definition: solver.h:176
pastix_int_t stride
Definition: solver.h:164
int8_t cblktype
Definition: solver.h:159
int ownerid
Definition: solver.h:175
void * lcoeftab
Definition: solver.h:171
Solver column block structure.
Definition: solver.h:156