20static void utest_check_io_config(
21 MPI_Comm comm,
int ref_local_is_io,
int * ref_io_ranks,
int ref_num_io_ranks);
22static void utest_generate_node_wise_ref_data(
23 MPI_Comm comm,
int num_io_ranks_per_node,
24 int ** ref_io_ranks,
int * ref_num_io_ranks,
int * ref_local_is_io);
30 int comm_rank, comm_size;
31 MPI_Comm_rank(MPI_COMM_WORLD, &comm_rank);
32 MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
34 if (comm_size != 12) {
35 PUT_ERR(
"ERROR: wrong number of processes");
45 setenv(
"YAC_IO_RANK_LIST",
"1,3,5,7,9,11", 1);
46 setenv(
"YAC_IO_MAX_NUM_RANKS_PER_NODE",
"12", 1);
48 int ref_local_is_io = comm_rank & 1;
49 int ref_io_ranks[] = {1,3,5,7,9,11};
50 int ref_num_io_ranks = (int)(
sizeof(ref_io_ranks) /
sizeof(ref_io_ranks[0]));
52 utest_check_io_config(
53 MPI_COMM_WORLD, ref_local_is_io, ref_io_ranks, ref_num_io_ranks);
61 setenv(
"YAC_IO_RANK_LIST",
"0,1,4,7", 1);
62 setenv(
"YAC_IO_MAX_NUM_RANKS_PER_NODE",
"12", 1);
64 MPI_Comm reverse_world_comm;
66 MPI_COMM_WORLD, 1, comm_size - comm_rank, &reverse_world_comm);
68 int ref_local_is_io = (comm_rank == 0) || (comm_rank == 1) ||
69 (comm_rank == 4) || (comm_rank == 7);
70 int ref_io_ranks[] = {4,7,10,11};
71 int ref_num_io_ranks = (int)(
sizeof(ref_io_ranks) /
sizeof(ref_io_ranks[0]));
73 utest_check_io_config(
74 reverse_world_comm, ref_local_is_io, ref_io_ranks, ref_num_io_ranks);
76 MPI_Comm_free(&reverse_world_comm);
84 setenv(
"YAC_IO_RANK_LIST",
"1,4,7,10", 1);
85 setenv(
"YAC_IO_MAX_NUM_RANKS_PER_NODE",
"12", 1);
87 int is_in_subgroup = (comm_rank >= 4) && (comm_rank <= 7);
88 MPI_Comm sub_world_comm;
90 MPI_COMM_WORLD, is_in_subgroup, comm_rank, &sub_world_comm);
92 int ref_local_is_io = (comm_rank == 4) || (comm_rank == 7);
93 int ref_io_ranks[] = {0,3};
94 int ref_num_io_ranks = (int)(
sizeof(ref_io_ranks) /
sizeof(ref_io_ranks[0]));
97 utest_check_io_config(
98 sub_world_comm, ref_local_is_io, ref_io_ranks, ref_num_io_ranks);
100 MPI_Comm_free(&sub_world_comm);
108 setenv(
"YAC_IO_RANK_LIST",
"1,3,5,7,9,11", 1);
109 setenv(
"YAC_IO_MAX_NUM_RANKS",
"3", 1);
110 setenv(
"YAC_IO_MAX_NUM_RANKS_PER_NODE",
"12", 1);
112 int ref_local_is_io = (comm_rank == 1) || (comm_rank == 3) ||
114 int ref_io_ranks[] = {1,3,5};
115 int ref_num_io_ranks = (int)(
sizeof(ref_io_ranks) /
sizeof(ref_io_ranks[0]));
117 utest_check_io_config(
118 MPI_COMM_WORLD, ref_local_is_io, ref_io_ranks, ref_num_io_ranks);
126 setenv(
"YAC_IO_RANK_LIST",
"0,1,2,3,4,5,6,7,8,9,10,11", 1);
127 setenv(
"YAC_IO_RANK_EXCLUDE_LIST",
"0,2,4,6,8,10", 1);
128 setenv(
"YAC_IO_MAX_NUM_RANKS_PER_NODE",
"12", 1);
130 int ref_local_is_io = comm_rank & 1;
131 int ref_io_ranks[] = {1,3,5,7,9,11};
132 int ref_num_io_ranks = (int)(
sizeof(ref_io_ranks) /
sizeof(ref_io_ranks[0]));
134 utest_check_io_config(
135 MPI_COMM_WORLD, ref_local_is_io, ref_io_ranks, ref_num_io_ranks);
143 setenv(
"YAC_IO_RANK_LIST",
"0,1,2,3,4,5,6,7,8,9,10,11", 1);
144 setenv(
"YAC_IO_RANK_EXCLUDE_LIST",
"0,2,4,6,8,10", 1);
145 setenv(
"YAC_IO_MAX_NUM_RANKS_PER_NODE",
"12", 1);
147 int is_in_subgroup = (comm_rank >= 4) && (comm_rank <= 7);
148 MPI_Comm sub_world_comm;
150 MPI_COMM_WORLD, is_in_subgroup, comm_rank, &sub_world_comm);
152 int ref_local_is_io = (comm_rank & 1) && is_in_subgroup;
153 int ref_io_ranks[] = {1,3};
154 int ref_num_io_ranks = (int)(
sizeof(ref_io_ranks) /
sizeof(ref_io_ranks[0]));
157 utest_check_io_config(
158 sub_world_comm, ref_local_is_io, ref_io_ranks, ref_num_io_ranks);
160 MPI_Comm_free(&sub_world_comm);
168 setenv(
"YAC_IO_MAX_NUM_RANKS_PER_NODE",
"1", 1);
172 int ref_num_io_ranks;
174 utest_generate_node_wise_ref_data(
175 MPI_COMM_WORLD, 1, &ref_io_ranks, &ref_num_io_ranks, &ref_local_is_io);
177 utest_check_io_config(
178 MPI_COMM_WORLD, ref_local_is_io, ref_io_ranks, ref_num_io_ranks);
188static void utest_check_io_config(
189 MPI_Comm comm,
int ref_local_is_io,
int * ref_io_ranks,
int ref_num_io_ranks) {
197 MPI_Comm_size(comm, &comm_size);
199 int *
flag = calloc((
size_t)comm_size,
sizeof(*
flag));
201 if (ref_local_is_io != local_is_io)
203 if (ref_num_io_ranks != num_io_ranks)
206 for (
int i = 0;
i < num_io_ranks; ++
i) {
207 if (
flag[io_ranks[i]])
208 PUT_ERR(
"duplicated entry in io_ranks");
212 for (
int i = 0;
i < ref_num_io_ranks; ++
i)
213 if (!
flag[ref_io_ranks[i]])
220static void utest_generate_node_wise_ref_data(
221 MPI_Comm comm,
int num_io_ranks_per_node,
222 int ** ref_io_ranks,
int * ref_num_io_ranks,
int * ref_local_is_io) {
224 int comm_rank, comm_size;
225 MPI_Comm_rank(comm, &comm_rank);
226 MPI_Comm_size(comm, &comm_size);
230 comm, MPI_COMM_TYPE_SHARED, comm_rank, MPI_INFO_NULL, &
split_comm);
235 *ref_local_is_io = split_comm_rank < num_io_ranks_per_node;
237 int * is_io_rank =
xmalloc((
size_t)comm_size *
sizeof(*is_io_rank));
239 ref_local_is_io, 1, MPI_INT, is_io_rank, 1, MPI_INT, comm);
241 int num_io_ranks = 0;
242 for (
int i = 0;
i < comm_size; ++
i)
244 is_io_rank[num_io_ranks++] =
i;
247 xrealloc(is_io_rank, (
size_t)num_io_ranks *
sizeof(**ref_io_ranks));
248 *ref_num_io_ranks = num_io_ranks;
void yac_get_io_ranks(MPI_Comm comm, int *local_is_io_, int **io_ranks_, int *num_io_ranks_)
#define xrealloc(ptr, size)
static MPI_Comm split_comm