YAC 3.14.0
Yet Another Coupler
Loading...
Searching...
No Matches
yac_mpi.c
Go to the documentation of this file.
1// Copyright (c) 2024 The YAC Authors
2//
3// SPDX-License-Identifier: BSD-3-Clause
4
5#ifdef HAVE_CONFIG_H
6#include "config.h"
7#endif
8
9#include <assert.h>
10#include <inttypes.h>
11#include <limits.h>
12#include <stdbool.h>
13#include <stdlib.h>
14#include <stdio.h>
15#include <string.h>
16
17#include <mpi.h>
18#include <yaxt.h>
19#include "yac_mpi_internal.h"
20#include "geometry.h"
21#include "ensure_array_size.h"
22#include "ppm/core.h"
23
26static int init_count = 0;
27static int yaxt_init_count = 0;
28
29static size_t * comm_buffer = NULL;
30static size_t comm_buffer_array_size = 0;
31static int comm_buffer_in_use = 0;
32
34
35 int mpi_initialized;
36 MPI_Initialized(&mpi_initialized);
37
38 return mpi_initialized;
39}
40
41void yac_yaxt_init(MPI_Comm comm) {
42
45 "ERROR(yac_yaxt_init): MPI has not yet been initialised");
46
49 "ERROR(yac_yaxt_init): yaxt was initialised by YAC. \n"
50 "In case there are multiple instances of YAC in parallel, the user has "
51 "to initialise yaxt such that it is available on all processes that "
52 "use YAC.")
53
54 if ((yaxt_init_count == 0) && (!xt_initialized() || xt_finalized())) {
55 xt_initialize(comm);
57 }
59}
60
61void yac_yaxt_init_f2c(MPI_Fint comm) {
62
63 yac_yaxt_init(MPI_Comm_f2c(comm));
64}
65
66static void yac_yaxt_cleanup() {
67
69 xt_finalize();
71 }
73}
74
75#define XSTR(s) STR(s)
76#define STR(s) #s
77
79
81 sizeof(size_t) == sizeof(YAC_MPI_SIZE_T_TYPE),
82 "ERROR(yac_mpi_init_core): "
83 "could not determine MPI data type for size_t "
84 "(sizeof(size_t): %zu; sizeof(%s): %zu)",
85 sizeof(size_t), XSTR(YAC_MPI_SIZE_T_TYPE),
86 sizeof(YAC_MPI_SIZE_T_TYPE))
87
88 if ((init_count == 0) && (!yac_mpi_is_initialised())) {
89 MPI_Init(NULL, NULL);
91 }
92
93 init_count++;
94}
95
97
98 if (init_count == 1) {
100 !comm_buffer_in_use, "ERROR(yac_mpi_finalize): comm_buffer still in use")
101 free(comm_buffer);
102 comm_buffer = NULL;
104 }
105 init_count--;
107}
108
110
113 yac_mpi_call(MPI_Finalize(), MPI_COMM_WORLD);
114}
115
116// GCOVR_EXCL_START
117//taken from http://beige.ucs.indiana.edu/I590/node85.html
118void yac_mpi_error(int error_code, MPI_Comm comm) {
119 int rank;
120 MPI_Comm_rank(comm, &rank);
121
122 char error_string[MPI_MAX_ERROR_STRING];
123 int length_of_error_string, error_class;
124
125 MPI_Error_class(error_code, &error_class);
126 MPI_Error_string(error_class, error_string, &length_of_error_string);
127 fprintf(stderr, "%3d: %s\n", rank, error_string);
128 MPI_Abort(comm, error_code);
129}
130// GCOVR_EXCL_STOP
131
133 void const * send_buffer, size_t const * sendcounts, size_t const * sdispls,
134 void * recv_buffer, size_t const * recvcounts, size_t const * rdispls,
135 size_t dt_size, MPI_Datatype dt, MPI_Comm comm, char const * caller, int line) {
136
137#define USE_P2P_ALLTOALLV
138#ifdef USE_P2P_ALLTOALLV
139 int comm_rank, comm_size;
140 yac_mpi_call(MPI_Comm_rank(comm, &comm_rank), comm);
141 yac_mpi_call(MPI_Comm_size(comm, &comm_size), comm);
142
143 int req_count = 0;
144 for (int i = 0; i < comm_size; ++i)
145 req_count += (sendcounts[i] > 0) + (recvcounts[i] > 0);
146 MPI_Request * req = xmalloc((size_t)req_count * sizeof(*req));
147
148 req_count = 0;
149 for (int j = 0, lb = comm_rank, ub = comm_size; j < 2;
150 ++j, lb = 0, ub = comm_rank) {
151 for (int i = lb; i < ub; ++i) {
152 if (sendcounts[i] > 0) {
154 sendcounts[i] <= INT_MAX,
155 "ERROR(%s(%d)::yac_alltoallv_p2p): "
156 "sendcounts[%d] = %zu exceeds INT_MAX (%d)",
157 caller, line, i, sendcounts[i], (int)INT_MAX)
159 MPI_Isend(
160 (void const *)((unsigned char *)send_buffer +
161 dt_size * sdispls[i]),
162 (int)(sendcounts[i]), dt, i, 0,
163 comm, req + req_count), comm);
164 ++req_count;
165 }
166 if (recvcounts[i] > 0) {
168 recvcounts[i] <= INT_MAX,
169 "ERROR(%s(%d)::yac_alltoallv_p2p): "
170 "recvcounts[%d] = %zu exceeds INT_MAX (%d)",
171 caller, line, i, recvcounts[i], (int)INT_MAX)
173 MPI_Irecv(
174 (void *)((unsigned char *)recv_buffer +
175 dt_size * rdispls[i]),
176 (int)(recvcounts[i]), dt, i, 0,
177 comm, req + req_count), comm);
178 ++req_count;
179 }
180 }
181 }
182 yac_mpi_call(MPI_Waitall(req_count, req, MPI_STATUSES_IGNORE), comm);
183 free(req);
184#else // USE_P2P_ALLTOALLV
185 int comm_size;
186 yac_mpi_call(MPI_Comm_size(comm, &comm_size), comm);
187 int * int_buffer = xmalloc(4 * comm_size * sizeof(*int_buffer));
188 int * int_sendcounts = int_buffer + 0 * comm_size;
189 int * int_sdispls = int_buffer + 1 * comm_size;
190 int * int_recvcounts = int_buffer + 2 * comm_size;
191 int * int_rdispls = int_buffer + 3 * comm_size;
192 for (int i = 0; i < comm_size; ++i) {
194 sendcounts[i] <= INT_MAX,
195 "ERROR(%s(%d)::yac_alltoallv_p2p): "
196 "sendcounts[%d] = %zu exceeds INT_MAX (%d)",
197 caller, line, i, sendcounts[i], (int)INT_MAX)
199 sdispls[i] <= INT_MAX,
200 "ERROR(%s(%d)::yac_alltoallv_p2p): "
201 "sdispls[%d] = %zu exceeds INT_MAX (%d)",
202 caller, line, i, sdispls[i], (int)INT_MAX)
204 recvcounts[i] <= INT_MAX,
205 "ERROR(%s(%d)::yac_alltoallv_p2p): "
206 "recvcounts[%d] = %zu exceeds INT_MAX (%d)",
207 caller, line, i, recvcounts[i], (int)INT_MAX)
209 rdispls[i] <= INT_MAX,
210 "ERROR(%s(%d)::yac_alltoallv_p2p): "
211 "rdispls[%d] = %zu exceeds INT_MAX (%d)",
212 caller, line, i, rdispls[i], (int)INT_MAX)
213 int_sendcounts[i] = (int)(sendcounts[i]);
214 int_sdispls[i] = (int)(sdispls[i]);
215 int_recvcounts[i] = (int)(recvcounts[i]);
216 int_rdispls[i] = (int)(rdispls[i]);
217 }
219 MPI_Alltoallv(send_buffer, int_sendcounts, int_sdispls, dt,
220 recv_buffer, int_recvcounts, int_rdispls, dt, comm), comm);
221 free(int_buffer);
222#endif // USE_P2P_ALLTOALLV
223}
224
225#define YAC_ALLTOALL_P2P_TYPE(NAME, TYPE, TYPE_SIZE, MPI_TYPE) \
226 void yac_alltoallv_ ## NAME ## _p2p( \
227 TYPE const * send_buffer, size_t const * sendcounts, size_t const * sdispls, \
228 TYPE * recv_buffer, size_t const * recvcounts, size_t const * rdispls, \
229 MPI_Comm comm, char const * caller, int line) { \
230 yac_alltoallv_p2p( \
231 (void const *)send_buffer, sendcounts, sdispls, \
232 (void *)recv_buffer, recvcounts, rdispls, \
233 TYPE_SIZE, MPI_TYPE, comm, caller, line); \
234 }
235
236YAC_ALLTOALL_P2P_TYPE(int, int, sizeof(int), MPI_INT)
238YAC_ALLTOALL_P2P_TYPE(uint64, uint64_t, sizeof(uint64_t), MPI_UINT64_T)
239YAC_ALLTOALL_P2P_TYPE(packed, void, 1, MPI_PACKED)
240YAC_ALLTOALL_P2P_TYPE(dble, double, sizeof(double), MPI_DOUBLE)
241YAC_ALLTOALL_P2P_TYPE(size_t, size_t, sizeof(size_t), YAC_MPI_SIZE_T)
242
244 void const * send_buffer, int const * sendcounts, int const * sdispls,
245 void * recv_buffer, int const * recvcounts, int const * rdispls,
246 size_t dt_size, MPI_Datatype dt, struct yac_group_comm group_comm) {
247
248 MPI_Comm comm = group_comm.comm;
249 int comm_rank;
250 yac_mpi_call(MPI_Comm_rank(comm, &comm_rank), comm);
251 int rank = comm_rank - group_comm.start;
252
253 int req_count = 0;
254 for (int i = 0; i < group_comm.size; ++i)
255 req_count += (sendcounts[i] > 0) + (recvcounts[i] > 0);
256 MPI_Request * req = xmalloc((size_t)req_count * sizeof(*req));
257
258 req_count = 0;
259 for (int j = 0, lb = rank, ub = group_comm.size; j < 2;
260 ++j, lb = 0, ub = rank) {
261 for (int i = lb; i < ub; ++i) {
262 if (sendcounts[i] > 0) {
263
265 MPI_Isend(
266 (void const *)((unsigned char *)send_buffer +
267 dt_size * (size_t)(sdispls[i])),
268 sendcounts[i], dt, i + group_comm.start, 0,
269 comm, req + req_count), comm);
270 ++req_count;
271 }
272 if (recvcounts[i] > 0) {
274 MPI_Irecv(
275 (void *)((unsigned char *)recv_buffer +
276 dt_size * (size_t)(rdispls[i])),
277 recvcounts[i], dt, i + group_comm.start, 0,
278 comm, req + req_count), comm);
279 ++req_count;
280 }
281 }
282 }
283 yac_mpi_call(MPI_Waitall(req_count, req, MPI_STATUSES_IGNORE), comm);
284 free(req);
285}
286
287static int nearest_power_of_two(int x) {
288 int power = 1;
289
290 while(power < x) power *= 2;
291
292 return power / 2;
293}
294
295// based on https://doi.org/10.1016/j.parco.2017.08.004
297 double * buffer, int count, struct yac_group_comm group_comm) {
298
299 int comm_rank;
300 yac_mpi_call(MPI_Comm_rank(group_comm.comm, &comm_rank), group_comm.comm);
301
302 int rank = comm_rank - group_comm.start;
303 int pof2 = nearest_power_of_two(group_comm.size);
304 int rem = group_comm.size - pof2;
305 int my_rank;
306 double * recv_buffer = xmalloc((size_t)count * sizeof(*recv_buffer));
307
308 if (rank < 2 * rem) {
309
310 if (rank & 1) {
312 MPI_Recv(
313 (void*)recv_buffer, count, MPI_DOUBLE, rank - 1 + group_comm.start, 0,
314 group_comm.comm, MPI_STATUS_IGNORE), group_comm.comm);
315 for (int i = 0; i < count; ++i) buffer[i] += recv_buffer[i];
316 my_rank = rank / 2;
317 } else {
319 MPI_Send(
320 (void const *)buffer, count, MPI_DOUBLE, rank + 1 + group_comm.start,
321 0, group_comm.comm), group_comm.comm);
322 my_rank = -1;
323 }
324 } else {
325 my_rank = rank - rem;
326 }
327 if (my_rank != -1) {
328 int mask = 1;
329 while (mask < pof2) {
330 int newdst = my_rank ^ mask;
331 int dst;
332 if (newdst < rem) dst = newdst * 2 + 1;
333 else dst = newdst + rem;
335 MPI_Sendrecv(
336 (void const*)buffer, count, MPI_DOUBLE, dst + group_comm.start, 0,
337 (void*)recv_buffer, count, MPI_DOUBLE, dst + group_comm.start, 0,
338 group_comm.comm, MPI_STATUS_IGNORE),
339 group_comm.comm);
340 for (int i = 0; i < count; ++i) buffer[i] += recv_buffer[i];
341 mask <<= 1;
342 }
343 }
344 free(recv_buffer);
345 if (rank < 2 * rem) {
346 if (rank & 1) {
348 MPI_Send(
349 (void const*)buffer, count, MPI_DOUBLE, rank - 1 + group_comm.start,
350 0, group_comm.comm), group_comm.comm);
351 } else {
353 MPI_Recv(
354 (void*)buffer, count, MPI_DOUBLE, rank + 1 + group_comm.start, 0,
355 group_comm.comm, MPI_STATUS_IGNORE), group_comm.comm);
356 }
357 }
358}
359
360static int log2_(int x) {
361 if (x <= 1) return 0;
362 int l2 = 0;
363 while (x >>= 1) ++l2;
364 return l2;
365}
366
367// based on https://doi.org/10.1109/71.642949
369 const size_t * sendbuf, size_t * recvbuf, int count,
370 struct yac_group_comm group_comm) {
371
372 int comm_rank;
373 yac_mpi_call(MPI_Comm_rank(group_comm.comm, &comm_rank), group_comm.comm);
374 int rank = comm_rank - group_comm.start;
375
376 size_t * temp = xmalloc((size_t)group_comm.size * (size_t)count * sizeof(*temp));
377
378 int lg2 = log2_(group_comm.size);
379 memcpy(temp, sendbuf, (size_t)count * sizeof(*temp));
380 int nblk = 1;
381 int curr_len = count;
382
383 for (int r = 0; r < lg2; ++r) {
384 int dst = (rank - nblk + group_comm.size) % group_comm.size;
385 int src = (rank + nblk) % group_comm.size;
387 MPI_Sendrecv(
388 (void const*)temp, curr_len, YAC_MPI_SIZE_T, dst + group_comm.start, 0,
389 (void *)(temp + (size_t)curr_len), curr_len, YAC_MPI_SIZE_T,
390 src + group_comm.start, 0, group_comm.comm, MPI_STATUS_IGNORE),
391 group_comm.comm);
392 nblk *= 2;
393 curr_len *= 2;
394 }
395 int rest = count * group_comm.size - curr_len;
396 int dst = (rank - nblk + group_comm.size) % group_comm.size;
397 int src = (rank + nblk) % group_comm.size;
399 MPI_Sendrecv(
400 (void const*)temp, rest, YAC_MPI_SIZE_T, dst + group_comm.start, 0,
401 (void*)(temp + (size_t)curr_len), rest, YAC_MPI_SIZE_T,
402 src + group_comm.start, 0, group_comm.comm, MPI_STATUS_IGNORE),
403 group_comm.comm);
404 memcpy(recvbuf + (size_t)count * (size_t)rank,
405 temp, (size_t)count * (size_t)(group_comm.size - rank) * sizeof(*temp));
406 memcpy(recvbuf, temp + (size_t)count * (size_t)(group_comm.size - rank),
407 (size_t)count * (size_t)rank * sizeof(*temp));
408
409 free(temp);
410}
411
413 void * buffer, int count, MPI_Datatype datatype, int root,
414 struct yac_group_comm group_comm) {
415
416 int comm_rank;
417 yac_mpi_call(MPI_Comm_rank(group_comm.comm, &comm_rank), group_comm.comm);
418 int rank = comm_rank - group_comm.start;
419
420 // if root is not part of the group
421 if ((root < group_comm.start) ||
422 (root >= group_comm.start + group_comm.size)) {
423
424 if (comm_rank == root) {
426 MPI_Send(
427 (void const*)buffer, count, datatype, group_comm.start, 0,
428 group_comm.comm), group_comm.comm);
429 return;
430 } else if (comm_rank == group_comm.start) {
432 MPI_Recv(
433 buffer, count, datatype, root, 0, group_comm.comm,
434 MPI_STATUS_IGNORE), group_comm.comm);
435 }
436 root = 0;
437 } else {
438 root -= group_comm.start;
439 }
440
441 // if not root, receive data
442 if (rank != root) {
443
444 int temp_rank = (group_comm.size + rank - root) % group_comm.size;
445 int bit = 1;
446
447 while (bit <= temp_rank) bit <<= 1;
448 bit >>= 1;
449
450 int src_rank =
451 (((temp_rank ^ bit) + root) % group_comm.size) + group_comm.start;
452
454 MPI_Recv(buffer, count, datatype, src_rank, 0, group_comm.comm,
455 MPI_STATUS_IGNORE), group_comm.comm);
456 }
457
458 // relative rank in respect to root
459 int temp_rank = (group_comm.size + rank - root) % group_comm.size;
460 int bit = 1, send_rank;
461
462 while(bit <= temp_rank) bit <<= 1;
463
464 while ((send_rank = temp_rank | bit) < group_comm.size) {
465
466 bit <<= 1;
467
468 send_rank = ((send_rank + root) % group_comm.size) + group_comm.start;
469
471 MPI_Send(
472 (void const*)buffer, count, datatype, send_rank, 0, group_comm.comm),
473 group_comm.comm);
474 }
475}
476
478
479 struct yac_group_comm group_comm;
480 group_comm.start = 0;
481 yac_mpi_call(MPI_Comm_size(comm, &(group_comm.size)), comm);
482 yac_mpi_call(MPI_Comm_dup(comm, &(group_comm.comm)), comm);
483
484 return group_comm;
485}
486
487void yac_group_comm_delete(struct yac_group_comm group_comm) {
488
489 yac_mpi_call(MPI_Comm_free(&(group_comm.comm)), group_comm.comm);
490}
491
493 return yac_group_comm_get_global_rank(group_comm) - group_comm.start;
494}
495
497 return group_comm.size;
498}
499
501 int comm_rank;
502 yac_mpi_call(MPI_Comm_rank(group_comm.comm, &comm_rank), group_comm.comm);
503 return comm_rank;
504}
505
507 int comm_size;
508 yac_mpi_call(MPI_Comm_size(group_comm.comm, &comm_size), group_comm.comm);
509 return comm_size;
510}
511
513 struct yac_group_comm group_comm, int split_rank,
514 struct yac_group_comm * local_group_comm,
515 struct yac_group_comm * remote_group_comm) {
516
517 int comm_rank;
518 MPI_Comm comm = group_comm.comm;
519 yac_mpi_call(MPI_Comm_rank(comm, &comm_rank), comm);
520
522 (split_rank >= 0) && (split_rank < group_comm.size),
523 "ERROR(yac_group_comm_split): invalid split rank")
524
525 int start[2] = {group_comm.start, group_comm.start + split_rank};
526 int size[2] = {split_rank, group_comm.size - split_rank};
527 int local_idx = (comm_rank - group_comm.start) >= split_rank;
528
529 local_group_comm->start = start[local_idx];
530 local_group_comm->size = size[local_idx];
531 local_group_comm->comm = comm;
532 remote_group_comm->start = start[local_idx^1];
533 remote_group_comm->size = size[local_idx^1];
534 remote_group_comm->comm = comm;
535}
536
538
539 struct bounding_circle dummy;
540 MPI_Datatype bnd_circle_dt;
541 int array_of_blocklengths[] = {3, 1, 1};
542 const MPI_Aint array_of_displacements[] =
543 {(MPI_Aint)(intptr_t)(const void *)&(dummy.base_vector[0]) -
544 (MPI_Aint)(intptr_t)(const void *)&dummy,
545 (MPI_Aint)(intptr_t)(const void *)&(dummy.inc_angle.sin) -
546 (MPI_Aint)(intptr_t)(const void *)&dummy,
547 (MPI_Aint)(intptr_t)(const void *)&(dummy.inc_angle.cos) -
548 (MPI_Aint)(intptr_t)(const void *)&dummy};
549 const MPI_Datatype array_of_types[] =
550 {MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE};
552 MPI_Type_create_struct(3, array_of_blocklengths, array_of_displacements,
553 array_of_types, &bnd_circle_dt), comm);
554 return yac_create_resized(bnd_circle_dt, sizeof(dummy), comm);
555}
556
558 MPI_Datatype dt, size_t new_size, MPI_Comm comm) {
559
560 MPI_Datatype resized_dt;
561
562#define OPENMPI_WORKAROUND
563#ifdef OPENMPI_WORKAROUND
564 MPI_Aint lb, extent;
565 MPI_Type_get_extent(dt, &lb, &extent);
567 MPI_Type_create_resized(dt, lb, (MPI_Aint)new_size, &resized_dt), comm);
568#else
570 MPI_Type_create_resized(dt, 0, (MPI_Aint)new_size, &resized_dt), comm);
571#endif
572#undef OPENMPI_WORKAROUND
573 yac_mpi_call(MPI_Type_free(&dt), comm);
574 yac_mpi_call(MPI_Type_commit(&resized_dt), comm);
575 return resized_dt;
576}
577
579 int count, size_t const * sendcounts, size_t * recvcounts,
580 size_t * sdispls, size_t * rdispls, MPI_Comm comm) {
581
582 int comm_size;
583 yac_mpi_call(MPI_Comm_size(comm, &comm_size), comm);
584
585 // exchange the number of requested points
587 MPI_Alltoall(
588 (void const*)sendcounts, count, YAC_MPI_SIZE_T,
589 (void*)recvcounts, count, YAC_MPI_SIZE_T, comm), comm);
590
591 // sdispls are offset by one position, this is intentional because this is
592 // usefull for packing of data
593 sdispls[0] = 0;
594 size_t iter_count = (size_t)(count * comm_size);
595 for (size_t i = 0, saccu = 0, raccu = 0; i < iter_count; ++i) {
596 sdispls[i+1] = saccu;
597 rdispls[i] = raccu;
598 saccu += sendcounts[i];
599 raccu += recvcounts[i];
600 }
601}
602
604 int count, size_t ** sendcounts, size_t ** recvcounts,
605 size_t ** sdispls, size_t ** rdispls, MPI_Comm comm) {
606
607 int comm_size;
608 yac_mpi_call(MPI_Comm_size(comm, &comm_size), comm);
609
610 size_t * comm_buffer_;
611 if (!comm_buffer_in_use) {
614 4 * (size_t)count * (size_t)comm_size + 1);
615 comm_buffer_ = comm_buffer;
617 } else {
618 comm_buffer_ =
619 xmalloc(
620 (4 * (size_t)count * (size_t)comm_size + 1) * sizeof(*comm_buffer_));
621 }
622
623 size_t offset = (size_t)count * (size_t)comm_size;
624 *sendcounts = comm_buffer_ + 0 * offset;
625 *recvcounts = comm_buffer_ + 1 * offset;
626 *rdispls = comm_buffer_ + 2 * offset;
627 *sdispls = comm_buffer_ + 3 * offset; // sdispls is bigger by one element,
628 // which is usefull when packing data
629 // for alltoallv operation
630 memset(
631 comm_buffer_, 0, (size_t)count * (size_t)comm_size * sizeof(*comm_buffer_));
632}
633
635 size_t * sendcounts, size_t * recvcounts,
636 size_t * sdispls, size_t * rdispls) {
637
638 UNUSED(recvcounts);
639 UNUSED(sdispls);
640 UNUSED(rdispls);
641
642 if (sendcounts != comm_buffer) free(sendcounts);
643 else comm_buffer_in_use = 0;
644}
645
647 char const * caller, char const * string, MPI_Comm comm, int allow_null) {
648
650 (string != NULL) || allow_null,
651 "ERROR(%s::yac_string_get_pack_size): "
652 "NULL string not allowed when allow_null is false", caller);
653
654 size_t len = (string == NULL)?0:strlen(string);
656 len <= INT_MAX,
657 "ERROR(%s::yac_string_get_pack_size): string too long", caller);
658
659 int strlen_pack_size, string_pack_size;
660 yac_mpi_call(MPI_Pack_size(1, MPI_INT, comm, &strlen_pack_size), comm);
662 MPI_Pack_size((int)len, MPI_CHAR, comm, &string_pack_size), comm);
663
664 return (size_t)strlen_pack_size + (size_t)string_pack_size;
665}
666
668 char const * caller, char const * string, void * buffer, int buffer_size,
669 int * position, MPI_Comm comm, int allow_null) {
670
672 (string != NULL) || allow_null,
673 "ERROR(%s::yac_string_pack): "
674 "NULL string not allowed when allow_null is false", caller)
675
676 int len_int;
677
678 if (string == NULL) {
679 len_int = allow_null ? -1 : 0;
680 } else {
681 size_t len = strlen(string);
683 len <= INT_MAX, "ERROR(%s::yac_string_pack): string too long", caller)
684 len_int = (int)len;
685 }
686
688 MPI_Pack(&len_int, 1, MPI_INT, buffer, buffer_size, position, comm), comm);
689
690 if (len_int > 0) {
692 MPI_Pack(string, len_int, MPI_CHAR, buffer, buffer_size, position, comm),
693 comm);
694 }
695}
696
698 void const * buffer, int buffer_size, int * position, MPI_Comm comm) {
699
700 int string_len;
702 MPI_Unpack(buffer, buffer_size, position, &string_len, 1, MPI_INT, comm),
703 comm);
704
705 char * string = NULL;
706
707 if (string_len > -1) {
708
709 string = xmalloc((size_t)string_len + 1);
710 if (string_len > 0) {
712 MPI_Unpack(
713 buffer, buffer_size, position, string, string_len, MPI_CHAR, comm),
714 comm);
715 }
716 string[string_len] = '\0';
717 }
718
719 return string;
720}
721
722/*
723 * Local Variables:
724 * c-basic-offset: 2
725 * coding: utf-8
726 * indent-tabs-mode: nil
727 * show-trailing-whitespace: t
728 * require-trailing-newline: t
729 * End:
730 */
#define YAC_ASSERT(exp, msg)
#define UNUSED(x)
Definition core.h:72
#define ENSURE_ARRAY_SIZE(arrayp, curr_array_size, req_size)
Definition __init__.py:1
#define xmalloc(size)
Definition ppm_xfuncs.h:66
struct sin_cos_angle inc_angle
angle between the middle point and the boundary of the spherical cap
Definition geometry.h:53
double base_vector[3]
Definition geometry.h:51
double sin
Definition geometry.h:33
double cos
Definition geometry.h:33
static int mask[16]
double * buffer
double * send_buffer
double * recv_buffer
#define YAC_ASSERT_F(exp, format,...)
Definition yac_assert.h:30
#define XSTR(s)
Definition yac_mpi.c:75
static int mpi_initialised_by_yac
Definition yac_mpi.c:24
#define YAC_ALLTOALL_P2P_TYPE(NAME, TYPE, TYPE_SIZE, MPI_TYPE)
Definition yac_mpi.c:225
void yac_mpi_finalize()
Definition yac_mpi.c:109
void yac_mpi_cleanup()
Definition yac_mpi.c:96
void yac_alltoallv_p2p_group(void const *send_buffer, int const *sendcounts, int const *sdispls, void *recv_buffer, int const *recvcounts, int const *rdispls, size_t dt_size, MPI_Datatype dt, struct yac_group_comm group_comm)
Definition yac_mpi.c:243
int yac_group_comm_get_global_rank(struct yac_group_comm group_comm)
Definition yac_mpi.c:500
void yac_generate_alltoallv_args(int count, size_t const *sendcounts, size_t *recvcounts, size_t *sdispls, size_t *rdispls, MPI_Comm comm)
Definition yac_mpi.c:578
static int yaxt_initialised_by_yac
Definition yac_mpi.c:25
void yac_free_comm_buffers(size_t *sendcounts, size_t *recvcounts, size_t *sdispls, size_t *rdispls)
Definition yac_mpi.c:634
int yac_group_comm_get_rank(struct yac_group_comm group_comm)
Definition yac_mpi.c:492
int yac_mpi_is_initialised()
Definition yac_mpi.c:33
static int init_count
Definition yac_mpi.c:26
void yac_group_comm_split(struct yac_group_comm group_comm, int split_rank, struct yac_group_comm *local_group_comm, struct yac_group_comm *remote_group_comm)
Definition yac_mpi.c:512
MPI_Datatype yac_get_bounding_circle_mpi_datatype(MPI_Comm comm)
Definition yac_mpi.c:537
static size_t * comm_buffer
Definition yac_mpi.c:29
int yac_group_comm_get_global_size(struct yac_group_comm group_comm)
Definition yac_mpi.c:506
void yac_yaxt_init(MPI_Comm comm)
Definition yac_mpi.c:41
struct yac_group_comm yac_group_comm_new(MPI_Comm comm)
Definition yac_mpi.c:477
void yac_mpi_error(int error_code, MPI_Comm comm)
Definition yac_mpi.c:118
static size_t comm_buffer_array_size
Definition yac_mpi.c:30
static int yaxt_init_count
Definition yac_mpi.c:27
void yac_allreduce_sum_dble(double *buffer, int count, struct yac_group_comm group_comm)
Definition yac_mpi.c:296
void yac_yaxt_init_f2c(MPI_Fint comm)
Definition yac_mpi.c:61
void yac_get_comm_buffers(int count, size_t **sendcounts, size_t **recvcounts, size_t **sdispls, size_t **rdispls, MPI_Comm comm)
Definition yac_mpi.c:603
static int log2_(int x)
Definition yac_mpi.c:360
int yac_group_comm_get_size(struct yac_group_comm group_comm)
Definition yac_mpi.c:496
size_t yac_string_get_pack_size(char const *caller, char const *string, MPI_Comm comm, int allow_null)
Compute number of bytes required to pack a string for MPI transport.
Definition yac_mpi.c:646
MPI_Datatype yac_create_resized(MPI_Datatype dt, size_t new_size, MPI_Comm comm)
Definition yac_mpi.c:557
void yac_bcast_group(void *buffer, int count, MPI_Datatype datatype, int root, struct yac_group_comm group_comm)
Definition yac_mpi.c:412
char * yac_string_unpack(void const *buffer, int buffer_size, int *position, MPI_Comm comm)
Unpack a C string from a buffer packed with yac_string_pack.
Definition yac_mpi.c:697
void yac_allgather_size_t(const size_t *sendbuf, size_t *recvbuf, int count, struct yac_group_comm group_comm)
Definition yac_mpi.c:368
static int nearest_power_of_two(int x)
Definition yac_mpi.c:287
static int comm_buffer_in_use
Definition yac_mpi.c:31
void yac_string_pack(char const *caller, char const *string, void *buffer, int buffer_size, int *position, MPI_Comm comm, int allow_null)
Pack a C string into a provided buffer using MPI_Pack semantics.
Definition yac_mpi.c:667
static void yac_yaxt_cleanup()
Definition yac_mpi.c:66
void yac_group_comm_delete(struct yac_group_comm group_comm)
Definition yac_mpi.c:487
void yac_mpi_init()
Definition yac_mpi.c:78
void yac_alltoallv_p2p(void const *send_buffer, size_t const *sendcounts, size_t const *sdispls, void *recv_buffer, size_t const *recvcounts, size_t const *rdispls, size_t dt_size, MPI_Datatype dt, MPI_Comm comm, char const *caller, int line)
Definition yac_mpi.c:132
#define yac_mpi_call(call, comm)
#define YAC_MPI_SIZE_T_TYPE
#define YAC_MPI_SIZE_T
YAC_INT yac_int
Definition yac_types.h:15
#define yac_int_dt
Definition yac_types.h:18