PaStiX Handbook 6.4.0
Loading...
Searching...
No Matches
cpucblk_cmpi_coeftab.c
Go to the documentation of this file.
1/**
2 *
3 * @file cpucblk_cmpi_coeftab.c
4 *
5 * Precision dependent routines to send and receive cblks coeftab.
6 *
7 * @copyright 2015-2024 Bordeaux INP, CNRS (LaBRI UMR 5800), Inria,
8 * Univ. Bordeaux. All rights reserved.
9 *
10 * @version 6.4.0
11 * @author Pierre Ramet
12 * @author Mathieu Faverge
13 * @author Tony Delarue
14 * @author Nolan Bredel
15 * @author Alycia Lisito
16 * @author Florent Pruvost
17 * @date 2024-07-05
18 *
19 * @generated from /builds/2mk6rsew/0/solverstack/pastix/kernels/cpucblk_zmpi_coeftab.c, normal z -> c, Tue Feb 25 14:34:59 2025
20 *
21 **/
22#include "common/common.h"
23#include "blend/solver.h"
25#include "kernels.h"
26#include "pastix_ccores.h"
27#include "pastix_clrcores.h"
28#include "cpucblk_cpack.h"
29#include <lapacke.h>
30
31#if defined( PASTIX_WITH_MPI )
32/**
33 *******************************************************************************
34 *
35 * @brief Asynchronously send a cblk to cblk->ownerid
36 *
37 *******************************************************************************
38 *
39 * @param[in] side
40 * Define which side of the cblk must be tested.
41 * @arg PastixLCoef if lower part only
42 * @arg PastixUCoef if upper part only
43 * @arg PastixLUCoef if both sides.
44 *
45 * @param[inout] solvmtx
46 * The solver matrix structure.
47 *
48 * @param[in] cblk
49 * The column block that will be sent.
50 *
51 *******************************************************************************/
52void
53cpucblk_cisend( pastix_coefside_t side,
54 SolverMatrix *solvmtx,
55 SolverCblk *cblk )
56{
57 MPI_Request request;
58 size_t bufsize;
59 void *buffer;
60 int rc;
61
62 assert( cblk->cblktype & CBLK_FANIN );
63
64 bufsize = cpucblk_ccompute_size( side, cblk );
65 buffer = cpucblk_cpack( side, cblk, bufsize );
66
67 /* Backup buffer pointer when packed */
68 if ( cblk->cblktype & CBLK_COMPRESSED ) {
69 cpucblk_cfree( side, cblk );
70 if ( side != PastixUCoef ) {
71 cblk->lcoeftab = buffer;
72 }
73 else {
74 cblk->ucoeftab = buffer;
75 }
76 }
77
78#if defined(PASTIX_DEBUG_MPI)
79 fprintf( stderr, "[%2d] Post Isend for cblk %ld toward %2d ( %ld Bytes )\n",
80 solvmtx->clustnum, (long)cblk->gcblknum, cblk->ownerid, bufsize );
81#endif
82
83 rc = MPI_Isend( buffer, bufsize, MPI_CHAR,
84 cblk->ownerid, cblk->gcblknum, solvmtx->solv_comm, &request );
85 assert( rc == MPI_SUCCESS );
86
87 solverCommMatrixAdd( solvmtx, cblk->ownerid, bufsize );
88
89 /* Register the request to make it progress */
90 pastix_atomic_lock( &(solvmtx->reqlock) );
91
92 assert( solvmtx->reqidx[ solvmtx->reqnum ] == -1 );
93 assert( solvmtx->reqnum >= 0 );
94 assert( solvmtx->reqnum < solvmtx->reqnbr );
95
96 solvmtx->reqtab[ solvmtx->reqnum ] = request;
97 solvmtx->reqidx[ solvmtx->reqnum ] = cblk - solvmtx->cblktab;
98 solvmtx->reqnum++;
99
100 pastix_atomic_unlock( &(solvmtx->reqlock) );
101
102 (void)rc;
103}
104
105/**
106 *******************************************************************************
107 *
108 * @brief Handle a finished request on a fanin
109 *
110 *******************************************************************************
111 *
112 * @param[in] side
113 * Define which side of the cblk are concerned.
114 * @arg PastixLCoef if lower part only
115 * @arg PastixUCoef if upper part only
116 * @arg PastixLUCoef if both sides.
117 *
118 * @param[in] solvmtx
119 * The solver matrix structure.
120 *
121 * @param[inout] cblk
122 * The cblk concerned by the computation.
123 *
124 *******************************************************************************/
125static inline void
126cpucblk_crequest_handle_fanin( pastix_coefside_t side,
127 const SolverMatrix *solvmtx,
128 SolverCblk *cblk )
129{
130 assert( cblk->cblktype & CBLK_FANIN );
131
132#if defined(PASTIX_DEBUG_MPI)
133 {
134 size_t cblksize = ( side == PastixLUCoef ) ? 2 : 1;
135 cblksize = cblksize * cblk_colnbr( cblk ) * cblk->stride * sizeof(pastix_complex32_t);
136
137 fprintf( stderr, "[%2d] Isend for cblk %ld toward %2d ( %ld Bytes ) (DONE)\n",
138 solvmtx->clustnum, (long)cblk->gcblknum, cblk->ownerid, (long)cblksize );
139 }
140#endif
141
142 if ( cblk->cblktype & CBLK_COMPRESSED ) {
143 /* Free the packed buffer */
144 if ( side != PastixUCoef ) {
145 memFree_null( cblk->lcoeftab );
146 }
147 else {
148 memFree_null( cblk->ucoeftab );
149 }
150 }
151 else {
152 /* Free the cblk */
153 cpucblk_cfree( side, cblk );
154 }
155
156 (void)solvmtx;
157}
158
159/**
160 *******************************************************************************
161 *
162 * @brief Handle a finished request on a recv cblk.
163 *
164 *******************************************************************************
165 *
166 * @param[in] side
167 * Define which side of the cblk are concerned.
168 * @arg PastixLCoef if lower part only
169 * @arg PastixUCoef if upper part only
170 * @arg PastixLUCoef if both sides.
171 *
172 * @param[inout] solvmtx
173 * The solver matrix structure.
174 *
175 * @param[inout] threadid
176 * TODO
177 *
178 * @param[inout] status
179 * TODO
180 *
181 * @param[inout] recvbuf
182 * TODO
183 *
184 *******************************************************************************/
185static inline void
186cpucblk_crequest_handle_recv( pastix_coefside_t side,
187 SolverMatrix *solvmtx,
188 int threadid,
189 const MPI_Status *status,
190 char *recvbuf )
191{
192 SolverCblk *cblk, *fcbk;
193 int src = status->MPI_SOURCE;
194 int tag = status->MPI_TAG;
195
196 assert( ( 0 <= src ) && ( src < solvmtx->clustnbr ) );
197 assert( ( 0 <= tag ) && ( tag < solvmtx->gcblknbr ) );
198
199 /*
200 * Let's look for the local cblk
201 */
202 fcbk = solvmtx->cblktab + solvmtx->gcbl2loc[ tag ];
203 cblk = fcbk-1;
204
205 /* Get through source */
206 while( cblk->ownerid != src ) {
207 cblk--;
208 assert( cblk >= solvmtx->cblktab );
209 assert( cblk->gcblknum == tag );
210 assert( cblk->cblktype & CBLK_RECV );
211 }
212 assert( fcbk == (solvmtx->cblktab + cblk->fblokptr->fcblknm) );
213
214#if defined(PASTIX_DEBUG_MPI)
215 {
216 int rc;
217 int count = 0;
218 pastix_int_t size = (cblk_colnbr(cblk) * cblk->stride) * sizeof(pastix_complex32_t);
219
220 if ( side != PastixLCoef ) {
221 size *= 2;
222 }
223
224 rc = MPI_Get_count( status, MPI_CHAR, &count );
225 assert( rc == MPI_SUCCESS );
226 assert( (cblk->cblktype & CBLK_COMPRESSED) ||
227 (!(cblk->cblktype & CBLK_COMPRESSED) && (count == size)) );
228
229 /* We can't know the sender easily, so we don't print it */
230 fprintf( stderr, "[%2d] Irecv of size %d/%ld for cblk %ld (DONE)\n",
231 solvmtx->clustnum, count, (long)size, (long)cblk->gcblknum );
232 (void)rc;
233 }
234#endif
235
236 /* Initialize the cblk with the reception buffer */
237 cblk->threadid = (fcbk->threadid == -1) ? threadid : fcbk->threadid;
238 cpucblk_cunpack( side, cblk, recvbuf );
239
240 fcbk = solvmtx->cblktab + cblk->fblokptr->fcblknm;
241 cpucblk_cadd( 1., cblk, fcbk, cblk_getdataL( cblk ), cblk_getdataL( fcbk ), NULL, 0, &solvmtx->lowrank );
242
243 /* If side is LU, let's add the U part too */
244 if ( side != PastixLCoef ) {
245 cpucblk_cadd( 1., cblk, fcbk, cblk_getdataU( cblk ), cblk_getdataU( fcbk ), NULL, 0, &solvmtx->lowrank );
246 }
247
248 /* Receptions cblks contribute to themselves */
249 cpucblk_crelease_deps( side, solvmtx, cblk, fcbk );
250
251 /* Free the CBLK_RECV */
252 cpucblk_cfree( side, cblk );
253}
254
255/**
256 *******************************************************************************
257 *
258 * @brief Handle a finished request.
259 *
260 * If cblktype & CBLK_FANIN : Will deallocate the coeftab
261 * If cblktype & CBLK_RECV : Will add cblk and deallocate the coeftab
262 *
263 *******************************************************************************
264 *
265 * @param[in] side
266 * Define which side of the cblk are concerned.
267 * @arg PastixLCoef if lower part only
268 * @arg PastixUCoef if upper part only
269 * @arg PastixLUCoef if both sides.
270 *
271 * @param[inout] solvmtx
272 * The solver matrix structure.
273 *
274 * @param[in] threadid
275 * Id of the thread calling this method.
276 *
277 * @param[in] outcount
278 * Amount of finshed requests
279 *
280 * @param[in] indexes
281 * Array of completed requests
282 *
283 * @param[in] statuses
284 * Array of statuses for the completed requests
285 *
286 *******************************************************************************
287 *
288 * @retval TODO
289 *
290 *******************************************************************************/
291static inline int
292cpucblk_crequest_handle( pastix_coefside_t side,
293 SolverMatrix *solvmtx,
294 int threadid,
295 int outcount,
296 const int *indexes,
297 const MPI_Status *statuses )
298{
299 pastix_int_t i, reqid;
300 int nbrequest = outcount;
301
302 for( i = 0; i < outcount; i++ ){
303 reqid = indexes[i];
304
305 /*
306 * Handle the reception
307 */
308 if ( solvmtx->reqidx[reqid] == -1 ) {
309 /* We're on a cblk recv, copy datas and restart communications */
310 char *recvbuf;
311 MPI_Status status;
312 int size;
313
314 memcpy( &status, statuses + i, sizeof(MPI_Status) );
315 MPI_Get_count( &status, MPI_CHAR, &size );
316
317 MALLOC_INTERN( recvbuf, size, char );
318 memcpy( recvbuf, solvmtx->rcoeftab, size );
319
320 solvmtx->recvcnt--;
321
322 /* Let's restart the communication */
323 assert( solvmtx->recvcnt >= 0 );
324 if ( solvmtx->recvcnt > 0 ) {
325 MPI_Start( solvmtx->reqtab + reqid );
326 nbrequest--;
327 }
328 else {
329 MPI_Request_free( solvmtx->reqtab + reqid );
330 solvmtx->reqtab[reqid] = MPI_REQUEST_NULL;
331 }
332
333 cpucblk_crequest_handle_recv( side, solvmtx, threadid,
334 &status, recvbuf );
335 }
336 /*
337 * Handle the emission
338 */
339 else {
340 SolverCblk *cblk = solvmtx->cblktab + solvmtx->reqidx[ reqid ];
341 assert( cblk->cblktype & CBLK_FANIN );
342
343 cpucblk_crequest_handle_fanin( side, solvmtx, cblk );
344
345#if !defined(NDEBUG)
346 solvmtx->reqidx[ reqid ] = -1;
347#endif
348 solvmtx->fanincnt--;
349 }
350 }
351
352 return nbrequest;
353}
354
355/**
356 *******************************************************************************
357 *
358 * @brief Update Request array ands Request indexes in a contiguous way.
359 *
360 *******************************************************************************
361 *
362 * @param[inout] solvmtx
363 * The solver matrix structure with the updated arrays.
364 *
365 *******************************************************************************/
366void
367cpucblk_cupdate_reqtab( SolverMatrix *solvmtx )
368{
369 /* Pointer to the compressed array of request */
370 MPI_Request *outrequest = solvmtx->reqtab;
371 pastix_int_t *outreqloc = solvmtx->reqidx;
372 int outreqnbr = 0;
373
374 /* Pointer to the input array of request */
375 MPI_Request *inrequest = solvmtx->reqtab;
376 pastix_int_t *inreqloc = solvmtx->reqidx;
377 int inreqnbr = 0;
378
379 /* Look for the first completed request */
380 while( (outreqnbr < solvmtx->reqnum) &&
381 (*outrequest != MPI_REQUEST_NULL) )
382 {
383 outrequest++;
384 outreqnbr++;
385 outreqloc++;
386 }
387
388 inrequest = outrequest;
389 inreqloc = outreqloc;
390 inreqnbr = outreqnbr;
391 for( ; inreqnbr < solvmtx->reqnum;
392 inreqnbr++, inrequest++, inreqloc++ )
393 {
394 if ( *inrequest == MPI_REQUEST_NULL )
395 {
396 continue;
397 }
398
399 /* Pack the uncompleted request */
400 *outrequest = *inrequest;
401 *outreqloc = *inreqloc;
402
403 /* Move to the next one */
404 outrequest++;
405 outreqloc++;
406 outreqnbr++;
407 }
408
409#if !defined(NDEBUG)
410 /* Set to -1 remaining of the array */
411 memset( outreqloc, 0xff, (solvmtx->reqnbr - outreqnbr) * sizeof(pastix_int_t) );
412#endif
413
414#if defined(PASTIX_DEBUG_MPI)
415 int i;
416 for( i = outreqnbr; i < solvmtx->reqnbr; i++ )
417 {
418 solvmtx->reqtab[i] = MPI_REQUEST_NULL;
419 }
420#endif
421 assert( outreqnbr < solvmtx->reqnum );
422 solvmtx->reqnum = outreqnbr;
423}
424
425/**
426 *******************************************************************************
427 *
428 * @ingroup kernel_fact
429 * @brief Progress communications for one process
430 *
431 * If a communication is completed, it will be treated.
432 * If cblktype & CBLK_FANIN : Will deallocate coeftab
433 * If cblktype & CBLK_RECV : Will add cblk to fcblk
434 *
435 *******************************************************************************
436 *
437 * @param[in] side
438 * Define which side of the cblk must be tested.
439 * @arg PastixLCoef if lower part only
440 * @arg PastixUCoef if upper part only
441 * @arg PastixLUCoef if both sides.
442 *
443 * @param[inout] solvmtx
444 * The solver matrix structure.
445 *
446 * @param[in] threadid
447 * Id of the thread calling this method.
448 *
449 *******************************************************************************/
450void
451cpucblk_cmpi_progress( pastix_coefside_t side,
452 SolverMatrix *solvmtx,
453 int threadid )
454{
455 pthread_t tid = pthread_self();
456 int outcount = 1;
457 int nbrequest, nbfree;
458 int indexes[ solvmtx->reqnbr ];
459 MPI_Status statuses[ solvmtx->reqnbr ];
460
461 /* Check if someone is already communicating or not */
462 pthread_mutex_lock( &pastix_comm_lock );
463 if ( pastix_comm_tid == (pthread_t)-1 ) {
464 pastix_comm_tid = tid;
465 }
466 pthread_mutex_unlock( &pastix_comm_lock );
467
468 if ( tid != pastix_comm_tid ) {
469 pastix_yield();
470 return;
471 }
472
473 /*
474 * Let's register the number of active requests.
475 * We now suppose that the current thread is working on the first nbrequest
476 * active in the reqtab array. Additional requests can be posted during this
477 * progression, but it will be with a larger index. Thus, we do not need to
478 * protect every changes in these requests.
479 * When this is done, the requests arrays is locked to be packed, and the
480 * number of requests is updated for the next round.
481 */
482 pastix_atomic_lock( &(solvmtx->reqlock) );
483 nbrequest = solvmtx->reqnum;
484 pastix_atomic_unlock( &(solvmtx->reqlock) );
485
486 while( (outcount > 0) && (nbrequest > 0) )
487 {
488 MPI_Testsome( nbrequest, solvmtx->reqtab, &outcount, indexes, statuses );
489 nbfree = 0;
490
491 /* Handle all the completed requests */
492 if ( outcount > 0 ) {
493 nbfree = cpucblk_crequest_handle( side, solvmtx, threadid,
494 outcount, indexes, statuses );
495 }
496
497 /*
498 * Pack the request arrays, and update the number of active requests by
499 * removing the completed ones
500 */
501 pastix_atomic_lock( &(solvmtx->reqlock) );
502 if ( nbfree > 0 ) {
503 cpucblk_cupdate_reqtab( solvmtx );
504 }
505 nbrequest = solvmtx->reqnum;
506 pastix_atomic_unlock( &(solvmtx->reqlock) );
507 }
508
509 pastix_comm_tid = (pthread_t)-1;
510 pastix_yield();
511}
512#endif /* defined(PASTIX_WITH_MPI) */
513
514/**
515 *******************************************************************************
516 *
517 * @brief Wait for incoming dependencies, and return when cblk->ctrbcnt has
518 * reached 0.
519 *
520 *******************************************************************************
521 *
522 * @param[in] rank
523 * The rank of the current thread.
524 *
525 * @param[in] side
526 * Define which side of the cblk must be released.
527 * @arg PastixLCoef if lower part only
528 * @arg PastixUCoef if upper part only
529 * @arg PastixLUCoef if both sides.
530 *
531 * @param[inout] solvmtx
532 * The solver matrix structure.
533 *
534 * @param[inout] cblk
535 * The column block that contribute to fcblk.
536 *
537 *******************************************************************************
538 *
539 * @return 1 if the cblk is a fanin, 0 otherwise
540 *
541 *******************************************************************************/
542int
545 SolverMatrix *solvmtx,
546 SolverCblk *cblk )
547{
548#if defined(PASTIX_WITH_MPI)
549 if ( cblk->cblktype & CBLK_FANIN ) {
550 /*
551 * We are in the sequential case, we progress on communications and
552 * return if nothing.
553 */
554 //cpucblk_ctestsome( side, solvmtx );
555 return 1;
556 }
557
558 if ( cblk->cblktype & CBLK_RECV ) {
559 return 1;
560 }
561
562 /* Make sure we receive every contribution */
563 while( cblk->ctrbcnt > 0 ) {
564 cpucblk_cmpi_progress( side, solvmtx, rank );
565 }
566#else
567 assert( !(cblk->cblktype & (CBLK_FANIN | CBLK_RECV)) );
568 do { pastix_yield(); } while( cblk->ctrbcnt > 0 );
569#endif
570
571 (void)rank;
572 (void)side;
573 (void)solvmtx;
574
575 return 0;
576}
577
578/**
579 *******************************************************************************
580 *
581 * @brief Release the dependencies of the given cblk after an update.
582 *
583 *******************************************************************************
584 *
585 * @param[in] side
586 * Define which side of the cblk must be released.
587 * @arg PastixLCoef if lower part only
588 * @arg PastixUCoef if upper part only
589 * @arg PastixLUCoef if both sides.
590 *
591 * @param[inout] solvmtx
592 * The solver matrix structure.
593 *
594 * @param[in] cblk
595 * The column block that contribute to fcblk.
596 *
597 * @param[inout] fcbk
598 * The facing column block that is updated by cblk.
599 *
600 *******************************************************************************/
601void
603 SolverMatrix *solvmtx,
604 const SolverCblk *cblk,
605 SolverCblk *fcbk )
606{
607 int32_t ctrbcnt;
608 ctrbcnt = pastix_atomic_dec_32b( &(fcbk->ctrbcnt) );
609 if ( !ctrbcnt ) {
610#if defined(PASTIX_WITH_MPI)
611 if ( fcbk->cblktype & CBLK_FANIN ) {
612 cpucblk_cisend( side, solvmtx, fcbk );
613 return;
614 }
615#else
616 (void)side;
617#endif
618 if ( solvmtx->computeQueue ) {
619 pastix_queue_t *queue = solvmtx->computeQueue[ cblk->threadid ];
620 assert( fcbk->priority != -1 );
621 pqueuePush1( queue, fcbk - solvmtx->cblktab, fcbk->priority );
622 }
623 }
624}
625
626/**
627 *******************************************************************************
628 *
629 * @brief Waitall routine for current cblk request
630 *
631 * It may be possible that some cblk will not be deallocated with the static
632 * scheduler. So a cleanup may be necessary.
633 *
634 *******************************************************************************
635 *
636 * @param[in] side
637 * Define which side of the cblk must be tested.
638 * @arg PastixLCoef if lower part only
639 * @arg PastixUCoef if upper part only
640 * @arg PastixLUCoef if both sides.
641 *
642 * @param[in] sched
643 * Define which sched is used
644 * @arg PastixSchedSequential if sequential
645 * @arg PastixSchedStatic if multi-threaded static scheduler
646 * @arg PastixSchedDynamic if multi-threaded dynamic scheduler
647 * No other scheduler is supported.
648 *
649 * @param[inout] solvmtx
650 * The solver matrix structure.
651 *
652 *******************************************************************************/
653void
655 pastix_int_t sched,
656 SolverMatrix *solvmtx )
657{
658 if ( (sched != PastixSchedSequential) &&
659 (sched != PastixSchedStatic) &&
660 (sched != PastixSchedDynamic) )
661 {
662 return;
663 }
664#if defined(PASTIX_WITH_MPI)
665 pastix_int_t i;
666 int rc;
667 SolverCblk *cblk;
668 int reqnbr = solvmtx->reqnum;
669 MPI_Status status;
670
671#if defined(PASTIX_DEBUG_MPI)
672 fprintf( stderr, "[%2d] Wait for all pending communications\n",
673 solvmtx->clustnum );
674#endif
675
676 for( i=0; i<reqnbr; i++ )
677 {
678 if ( solvmtx->reqtab[i] == MPI_REQUEST_NULL ) {
679 assert( 0 /* MPI_REQUEST_NULL should have been pushed to the end */ );
680 solvmtx->reqnum--;
681 continue;
682 }
683
684 /* Make sure that we don't have an already cleaned request in dynamic */
685 assert( solvmtx->reqidx[i] != -1 );
686
687 rc = MPI_Wait( solvmtx->reqtab + i, &status );
688 assert( rc == MPI_SUCCESS );
689
690 cblk = solvmtx->cblktab + solvmtx->reqidx[i];
691
692 /* We should wait only for fanin */
693 assert( cblk->cblktype & CBLK_FANIN );
694
695 cpucblk_crequest_handle_fanin( side, solvmtx, cblk );
696
697 solvmtx->reqnum--;
698 }
699 assert( solvmtx->reqnum == 0 );
700 (void)rc;
701#else
702 (void)side;
703 (void)solvmtx;
704#endif
705}
size_t cpucblk_ccompute_size(pastix_coefside_t side, const SolverCblk *cblk)
Compute the size of the buffer to send.
void cpucblk_cunpack(pastix_coefside_t side, SolverCblk *cblk, void *buffer)
Unpack data and fill the column block concerned by the computation.
void * cpucblk_cpack(pastix_coefside_t side, SolverCblk *cblk, size_t size)
Pack a column block (Full rank or low rank).
BEGIN_C_DECLS typedef int pastix_int_t
Definition datatypes.h:51
float _Complex pastix_complex32_t
Definition datatypes.h:76
static void pqueuePush1(pastix_queue_t *q, pastix_int_t elt, double key1)
Push an element with a single key.
Definition queue.h:64
Queue structure.
Definition queue.h:38
void cpucblk_crequest_cleanup(pastix_coefside_t side, pastix_int_t sched, SolverMatrix *solvmtx)
Waitall routine for current cblk request.
void cpucblk_crelease_deps(pastix_coefside_t side, SolverMatrix *solvmtx, const SolverCblk *cblk, SolverCblk *fcbk)
Release the dependencies of the given cblk after an update.
pastix_fixdbl_t cpucblk_cadd(pastix_complex32_t alpha, const SolverCblk *cblkA, SolverCblk *cblkB, const void *A, void *B, pastix_complex32_t *work, pastix_int_t lwork, const pastix_lr_t *lowrank)
Add two column bloks in full rank format.
void cpucblk_cfree(pastix_coefside_t side, SolverCblk *cblk)
Free the cblk structure that store the coefficient.
int cpucblk_cincoming_deps(int rank, pastix_coefside_t side, SolverMatrix *solvmtx, SolverCblk *cblk)
Wait for incoming dependencies, and return when cblk->ctrbcnt has reached 0.
enum pastix_coefside_e pastix_coefside_t
Data blocks used in the kernel.
@ PastixLCoef
Definition api.h:478
@ PastixLUCoef
Definition api.h:480
@ PastixUCoef
Definition api.h:479
@ PastixSchedStatic
Definition api.h:335
@ PastixSchedDynamic
Definition api.h:338
@ PastixSchedSequential
Definition api.h:334
pastix_int_t reqnbr
Definition solver.h:271
pastix_atomic_lock_t reqlock
Definition solver.h:273
void * ucoeftab
Definition solver.h:178
pastix_lr_t lowrank
Definition solver.h:236
pastix_int_t priority
Definition solver.h:183
static pastix_int_t cblk_colnbr(const SolverCblk *cblk)
Compute the number of columns in a column block.
Definition solver.h:329
static void * cblk_getdataU(const SolverCblk *cblk)
Get the pointer to the data associated to the upper part of the cblk.
Definition solver.h:354
pastix_int_t fcblknm
Definition solver.h:144
MPI_Request * reqtab
Definition solver.h:269
pastix_int_t recvcnt
Definition solver.h:217
void * rcoeftab
Definition solver.h:274
pastix_int_t * gcbl2loc
Definition solver.h:234
pastix_int_t fanincnt
Definition solver.h:214
volatile int32_t ctrbcnt
Definition solver.h:163
SolverBlok * fblokptr
Definition solver.h:168
static void * cblk_getdataL(const SolverCblk *cblk)
Get the pointer to the data associated to the lower part of the cblk.
Definition solver.h:342
volatile int32_t reqnum
Definition solver.h:272
pastix_int_t gcblknum
Definition solver.h:174
SolverCblk *restrict cblktab
Definition solver.h:228
pastix_int_t stride
Definition solver.h:169
int8_t cblktype
Definition solver.h:164
pastix_int_t * reqidx
Definition solver.h:270
void * lcoeftab
Definition solver.h:177
Solver column block structure.
Definition solver.h:161
Solver column block structure.
Definition solver.h:203