PaStiX Handbook  6.3.2
binding_for_multimpi.c
Go to the documentation of this file.
1 /**
2  *
3  * @file binding_for_multimpi.c
4  * @copyright 2013-2023 Bordeaux INP, CNRS (LaBRI UMR 5800), Inria,
5  * Univ. Bordeaux. All rights reserved.
6  *
7  * @version 6.3.2
8  * @author Pierre Ramet
9  * @author Mathieu Faverge
10  * @date 2023-07-21
11  *
12  * Small test to see what's the bindtab array should look like in case of
13  * multiple MPI processes per node.
14  * This test can be compiled with:
15  * mpicc binding_for_multimpi.c -o binding_for_multimpi -Wall -lpthread -lhwloc
16  *
17  */
18 #include <stdlib.h>
19 #include <stdio.h>
20 #include <mpi.h>
21 #include <unistd.h>
22 #include <string.h>
23 #include <hwloc.h>
24 
25 /**
26  * Extract of PaStiX internal scheduler for simpler compilation of this small test
27  * DO NOT use these function in your code, but call directly the ones from PaStiX.
28  */
29 static hwloc_topology_t topology;
30 
31 #ifndef DOXYGEN_SHOULD_SKIP_THIS
32 int isched_topo_init(void)
33 {
34  hwloc_topology_init(&topology);
35  hwloc_topology_load(topology);
36  return 0;
37 }
38 
39 int isched_topo_world_size()
40 {
41  hwloc_obj_t obj = hwloc_get_obj_by_type( topology, HWLOC_OBJ_MACHINE, 0 );
42  return hwloc_get_nbobjs_inside_cpuset_by_type(topology, obj->cpuset, HWLOC_OBJ_CORE);
43 }
44 
45 /**
46  * End of the extract
47  */
48 
49 #define BUF_MAX 256
50 
51 int main( int argc, char *argv[] )
52 {
53  MPI_Comm intra_comm;
54  int i, len;
55  char procname[BUF_MAX];
56  int rc, key;
57  int64_t color;
58  int world_size, world_rank;
59  int intra_size, intra_rank;
60  int nbthread, intra_nbthread;
61 
62  MPI_Init( &argc, &argv );
63  MPI_Comm_size( MPI_COMM_WORLD, &world_size );
64  MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );
65  key = world_rank;
66 
67 
68  /**
69  * Section that need to be added to the code to generate the bindtab arrays
70  */
71  {
72  /**
73  * Get hostname to generate a hash that will be the color of each node
74  * MPI_Get_processor_name is not used as it can returned different
75  * strings for processes of a same physical node.
76  */
77  rc = gethostname(procname, BUF_MAX-1);
78  procname[BUF_MAX-1] = '\0';
79  len = strlen( procname );
80 
81  /**
82  * Compute hash of the procname
83  */
84  color = 0;
85  for (i = 0; i < len; i++) {
86  color = color*256*sizeof(char) + procname[i];
87  }
88 
89  /**
90  * Create intra-node communicator
91  */
92  MPI_Comm_split( MPI_COMM_WORLD, color, key, &intra_comm );
93  MPI_Comm_size( intra_comm, &intra_size );
94  MPI_Comm_rank( intra_comm, &intra_rank );
95 
96  /**
97  * Initialize hwloc topology of pastix
98  * Can be done before pastixInit, and must be done in that case to get
99  * the number of cores available
100  */
101  isched_topo_init();
102  nbthread = isched_topo_world_size();
103 
104  intra_nbthread = nbthread / intra_size;
105  /* Make sure it's at least 1 */
106  intra_nbthread = (intra_nbthread < 1) ? 1 : intra_nbthread;
107  }
108 
109  if ( world_rank == 0 ) {
110  printf( " Number of MPI processes: %d\n"
111  " Number of nodes: %d\n"
112  " Number of cores per node: %d\n"
113  " Number of cores per process: %d\n",
114  world_size, world_size / intra_size,
115  nbthread, intra_nbthread );
116  }
117 
118  /**
119  * Print the bindtab arrays per node
120  */
121  {
122  char corelists[2*BUF_MAX];
123  char *c = corelists;
124  rc = sprintf( c, "[%2d - %s] :", world_rank, procname );
125  c += rc;
126  for(i=0; i<intra_nbthread; i++) {
127  rc = sprintf( c, "%2d ", intra_nbthread * intra_rank + i );
128  c += rc;
129  }
130  printf( "%s\n", corelists );
131  }
132 
133  MPI_Finalize();
134 
135  return EXIT_SUCCESS;
136 }
137 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
static hwloc_topology_t topology