YAC 3.12.0
Yet Another Coupler
Loading...
Searching...
No Matches
read_exodus_grid.c
Go to the documentation of this file.
1// Copyright (c) 2025 The YAC Authors
2//
3// SPDX-License-Identifier: BSD-3-Clause
4
5#ifdef HAVE_CONFIG_H
6#include "config.h"
7#endif
8
9#include <stdlib.h>
10#include <stdio.h>
11#include <string.h>
12#include <assert.h>
13#include <math.h>
14
15#include "read_exodus_grid.h"
16#include "utils_common.h"
17#include "io_utils.h"
18#include "geometry.h"
19#include "read_grid.h"
20#include "yac_mpi_internal.h"
21
22#ifdef YAC_NETCDF_ENABLED
23#include <netcdf.h>
24
25static size_t * generate_offsets(size_t N, int * counts) {
26
27 size_t * offsets = xmalloc(N * sizeof(*offsets));
28 for (size_t i = 0, accu = 0; i < N; ++i) {
29 offsets[i] = accu;
30 accu += (size_t)(counts[i]);
31 }
32 return offsets;
33}
34
35// taken from scales-ppm library
36// https://www.dkrz.de/redmine/projects/scales-ppm
37static inline int
38partition_idx_from_element_idx(size_t element_idx, size_t num_elements,
39 int num_partitions) {
40
41 return (int)((((unsigned long)element_idx) * ((unsigned long)num_partitions) +
42 (unsigned long)num_partitions - 1) /
43 ((unsigned long)num_elements));
44}
45
47 const char * filename, MPI_Comm comm, yac_coordinate_pointer * node_coords,
48 yac_int ** elem_ids, yac_int ** node_ids,
49 size_t * num_elem, size_t * num_nodes,
50 int ** num_nodes_per_elem, int ** num_elem_per_node,
51 size_t ** elem_to_node, size_t ** node_to_elem) {
52
53 int comm_rank, comm_size;
54
55 MPI_Comm_rank(comm, &comm_rank);
56 MPI_Comm_size(comm, &comm_size);
57
58 int local_is_io, * io_ranks, num_io_ranks;
59 yac_get_io_ranks(comm, &local_is_io, &io_ranks, &num_io_ranks);
60
61 size_t num_global_nodes, num_global_elem, num_nod_per_elem;
62
63 size_t read_local_start_elem = 0;
64 size_t read_num_local_elems = 0;
65 size_t read_local_start_node = 0;
66 size_t read_num_local_nodes = 0;
67
68 yac_coordinate_pointer read_node_coords = NULL;
69 int * read_dist_elem_to_node = NULL;
70
71 if (local_is_io) {
72
73 unsigned long io_proc_idx = ULONG_MAX;
74 for (int i = 0; (i < num_io_ranks) && (io_proc_idx == ULONG_MAX); ++i)
75 if (io_ranks[i] == comm_rank)
76 io_proc_idx = (unsigned long)i;
77
78 // open file
79 int ncid;
80 yac_nc_open(filename, NC_NOWRITE, &ncid);
81
82 // get number of cells and vertices
83 int dim_id;
84 size_t num_el_blk, num_el_in_blk1;
85 yac_nc_inq_dimid(ncid, "num_nodes", &dim_id);
86 YAC_HANDLE_ERROR(nc_inq_dimlen(ncid, dim_id, &num_global_nodes));
87 yac_nc_inq_dimid(ncid, "num_elem", &dim_id);
88 YAC_HANDLE_ERROR(nc_inq_dimlen(ncid, dim_id, &num_global_elem));
89 yac_nc_inq_dimid(ncid, "num_el_blk", &dim_id);
90 YAC_HANDLE_ERROR(nc_inq_dimlen(ncid, dim_id, &num_el_blk));
91 yac_nc_inq_dimid(ncid, "num_el_in_blk1", &dim_id);
92 YAC_HANDLE_ERROR(nc_inq_dimlen(ncid, dim_id, &num_el_in_blk1));
93 yac_nc_inq_dimid(ncid, "num_nod_per_el1", &dim_id);
94 YAC_HANDLE_ERROR(nc_inq_dimlen(ncid, dim_id, &num_nod_per_elem));
95
97 num_el_blk == 1,
98 "ERROR(yac_read_exodus_grid_information_parallel): "
99 "reader currently only supports a single block of elements");
101 num_global_elem == num_el_in_blk1,
102 "ERROR(yac_read_exodus_grid_information_parallel): "
103 "total number of elements and number of elements in first block "
104 "of elements do not match");
105
106 // determine local range for element and node data
107 read_local_start_elem =
108 ((unsigned long)num_global_elem * io_proc_idx) / (unsigned long)num_io_ranks;
109 read_num_local_elems =
110 ((unsigned long)num_global_elem * (io_proc_idx+1)) / (unsigned long)num_io_ranks -
111 (unsigned long)read_local_start_elem;
112 read_local_start_node =
113 ((unsigned long)num_global_nodes * io_proc_idx) / (unsigned long)num_io_ranks;
114 read_num_local_nodes =
115 ((unsigned long)num_global_nodes * (io_proc_idx+1)) / (unsigned long)num_io_ranks -
116 (unsigned long)read_local_start_node;
117
118 // read basic grid data (each process its individual part)
119 double * read_node_coord_x =
120 xmalloc(read_num_local_nodes * sizeof(*read_node_coord_x));
121 double * read_node_coord_y =
122 xmalloc(read_num_local_nodes * sizeof(*read_node_coord_y));
123 double * read_node_coord_z =
124 xmalloc(read_num_local_nodes * sizeof(*read_node_coord_z));;
125 int varid;
126 yac_nc_inq_varid(ncid, "coord", &varid);
128 nc_get_vara_double(
129 ncid, varid, (size_t[]){0, read_local_start_node},
130 (size_t[]){1,read_num_local_nodes}, read_node_coord_x));
132 nc_get_vara_double(
133 ncid, varid, (size_t[]){1, read_local_start_node},
134 (size_t[]){1,read_num_local_nodes}, read_node_coord_y));
136 nc_get_vara_double(
137 ncid, varid, (size_t[]){2, read_local_start_node},
138 (size_t[]){1,read_num_local_nodes}, read_node_coord_z));
139 read_node_coords =
140 xmalloc(read_num_local_nodes * sizeof(*read_node_coords));
141 for (size_t i = 0; i < read_num_local_nodes; ++i) {
142 read_node_coords[i][0] = read_node_coord_x[i];
143 read_node_coords[i][1] = read_node_coord_y[i];
144 read_node_coords[i][2] = read_node_coord_z[i];
145 }
146 free(read_node_coord_z);
147 free(read_node_coord_y);
148 free(read_node_coord_x);
149
150 read_dist_elem_to_node =
151 xmalloc(
152 read_num_local_elems * num_nod_per_elem *
153 sizeof(*read_dist_elem_to_node));
154 yac_nc_inq_varid(ncid, "connect1", &varid);
156 nc_get_vara_int(
157 ncid, varid, (size_t[]){read_local_start_elem, 0},
158 (size_t[]){read_num_local_elems, num_nod_per_elem},
159 read_dist_elem_to_node));
160 for (size_t i = 0; i < read_num_local_elems * num_nod_per_elem; ++i)
161 read_dist_elem_to_node[i]--;
162
163 YAC_HANDLE_ERROR(nc_close(ncid));
164
165 } else {
166 read_node_coords = xmalloc(1 * sizeof(*read_node_coords));
167 read_dist_elem_to_node = xmalloc(1 * sizeof(*read_dist_elem_to_node));
168 }
169
170 free(io_ranks);
171
172 {
173 size_t tmp;
174 if (comm_rank == 0) tmp = num_global_nodes;
175 MPI_Bcast(&tmp, 1, YAC_MPI_SIZE_T, 0, comm);
176 num_global_nodes = tmp;
177 if (comm_rank == 0) tmp = num_global_elem;
178 MPI_Bcast(&tmp, 1, YAC_MPI_SIZE_T, 0, comm);
179 num_global_elem = tmp;
180 if (comm_rank == 0) tmp = num_nod_per_elem;
181 MPI_Bcast(&tmp, 1, YAC_MPI_SIZE_T, 0, comm);
182 num_nod_per_elem = tmp;
183 }
184
185 // determine local range for element and node data
186 size_t local_start_elem =
187 ((unsigned long)num_global_elem * (unsigned long)comm_rank) /
188 (unsigned long)comm_size;
189 size_t num_local_elems =
190 ((unsigned long)num_global_elem * ((unsigned long)comm_rank+1)) /
191 (unsigned long)comm_size - (unsigned long)local_start_elem;
192 size_t local_start_node =
193 ((unsigned long)num_global_nodes * (unsigned long)comm_rank) /
194 (unsigned long)comm_size;
195 size_t num_local_nodes =
196 ((unsigned long)num_global_nodes * ((unsigned long)comm_rank+1)) /
197 (unsigned long)comm_size - (unsigned long)local_start_node;
198
199 // redistribute basic element data (from io decomposition)
200 int * dist_elem_to_node =
201 xmalloc(num_local_elems * num_nod_per_elem * sizeof(*dist_elem_to_node));
202 {
203 int * send_count = xcalloc(comm_size, sizeof(*send_count));
204 int * recv_count = xcalloc(comm_size, sizeof(*recv_count));
205
206 for (size_t i = 0; i < read_num_local_elems; ++i)
207 send_count[
209 read_local_start_elem + i, num_global_elem, comm_size)] +=
210 num_nod_per_elem;
211
212 MPI_Alltoall(send_count, 1, MPI_INT, recv_count, 1, MPI_INT, comm);
213
214 int * send_displ = xmalloc(comm_size * sizeof(*send_displ));
215 int * recv_displ = xmalloc(comm_size * sizeof(*recv_displ));
216 int send_accum = 0, recv_accum = 0;
217 for (int i = 0; i < comm_size; ++i) {
218 send_displ[i] = send_accum;
219 recv_displ[i] = recv_accum;
220 send_accum += send_count[i];
221 recv_accum += recv_count[i];
222 }
223
224 MPI_Alltoallv(read_dist_elem_to_node, send_count, send_displ, MPI_INT,
225 dist_elem_to_node, recv_count, recv_displ, MPI_INT, comm);
226
227 free(recv_displ);
228 free(send_displ);
229 free(recv_count);
230 free(send_count);
231 free(read_dist_elem_to_node);
232 }
233
234 // redistribute basic node data (from io decomposition)
235 yac_coordinate_pointer dist_node_coords =
236 xmalloc(num_local_nodes * sizeof(*dist_node_coords));
237 {
238 int * send_count = xcalloc(comm_size, sizeof(*send_count));
239 int * recv_count = xcalloc(comm_size, sizeof(*recv_count));
240
241 for (size_t i = 0; i < read_num_local_nodes; ++i)
242 send_count[
244 read_local_start_node + i, num_global_nodes, comm_size)] += 3;
245
246 MPI_Alltoall(send_count, 1, MPI_INT, recv_count, 1, MPI_INT, comm);
247
248 int * send_displ = xmalloc(comm_size * sizeof(*send_displ));
249 int * recv_displ = xmalloc(comm_size * sizeof(*recv_displ));
250 int send_accum = 0, recv_accum = 0;
251 for (int i = 0; i < comm_size; ++i) {
252 send_displ[i] = send_accum;
253 recv_displ[i] = recv_accum;
254 send_accum += send_count[i];
255 recv_accum += recv_count[i];
256 }
257
258 MPI_Alltoallv(read_node_coords, send_count, send_displ, MPI_DOUBLE,
259 dist_node_coords, recv_count, recv_displ, MPI_DOUBLE, comm);
260
261 free(recv_displ);
262 free(send_displ);
263 free(recv_count);
264 free(send_count);
265 free(read_node_coords);
266 }
267
268 // determine required nodes for core elements
269 // in additional compute elem_to_node, node_to_elem, and num_elem_per_node
270 size_t num_core_nodes;
271 {
272 size_t N = num_local_elems * num_nod_per_elem;
273 *node_ids = xmalloc(N * sizeof(**node_ids));
274 *num_elem_per_node = xmalloc(N * sizeof(**num_elem_per_node));
275 *elem_to_node = xmalloc(N * sizeof(**elem_to_node));
276 *node_to_elem = xmalloc(N * sizeof(**node_to_elem));
277 for (size_t i = 0; i < N; ++i)
278 (*node_ids)[i] = (yac_int)dist_elem_to_node[i];
279 size_t * permutation = *node_to_elem;
280 for (size_t i = 0; i < N; ++i) permutation[i] = i;
281 yac_quicksort_index_yac_int_size_t(*node_ids, N, permutation);
282 // remove duplicated core nodes and count number of elements per node
283 yac_int prev_node_id = YAC_INT_MAX;
284 num_core_nodes = 0;
285 for (size_t i = 0; i < N; ++i) {
286 yac_int curr_node_id = (*node_ids)[i];
287 if (prev_node_id == curr_node_id) {
288 (*num_elem_per_node)[num_core_nodes-1]++;
289 } else {
290 (*num_elem_per_node)[num_core_nodes] = 1;
291 (*node_ids)[num_core_nodes] = (prev_node_id = curr_node_id);
292 ++num_core_nodes;
293 }
294 (*elem_to_node)[permutation[i]] = num_core_nodes-1;
295 permutation[i] /= num_nod_per_elem;
296 }
297 *node_ids =
298 xrealloc(*node_ids, num_core_nodes * sizeof(**node_ids));
299 *num_elem_per_node =
300 xrealloc(*num_elem_per_node,
301 num_core_nodes * sizeof(**num_elem_per_node));
302 free(dist_elem_to_node);
303 }
304
305 // get node coordinate data
306 {
307 *node_coords = xmalloc(num_core_nodes * sizeof(**node_coords));
308 int * send_count = xcalloc(comm_size, sizeof(*send_count));
309 int * recv_count = xcalloc(comm_size, sizeof(*recv_count));
310
311 for (size_t i = 0; i < num_core_nodes; ++i)
312 send_count[
314 (*node_ids)[i], num_global_nodes, comm_size)]++;
315
316 MPI_Alltoall(send_count, 1, MPI_INT, recv_count, 1, MPI_INT, comm);
317
318 int * send_displ = xmalloc(comm_size * sizeof(*send_displ));
319 int * recv_displ = xmalloc(comm_size * sizeof(*recv_displ));
320 int send_accum = 0, recv_accum = 0;
321 for (int i = 0; i < comm_size; ++i) {
322 send_displ[i] = send_accum;
323 recv_displ[i] = recv_accum;
324 send_accum += send_count[i];
325 recv_accum += recv_count[i];
326 }
327
328 int num_all_local_nodes_remote = 0;
329 for (int i = 0; i < comm_size; ++i)
330 num_all_local_nodes_remote += recv_count[i];
331
332 yac_int * remote_node_buffer =
333 xmalloc(num_all_local_nodes_remote * sizeof(*remote_node_buffer));
334
335 MPI_Alltoallv(
336 *node_ids, send_count, send_displ, yac_int_dt,
337 remote_node_buffer, recv_count, recv_displ, yac_int_dt, comm);
338
339 yac_coordinate_pointer send_node_coords =
340 xmalloc(num_all_local_nodes_remote * sizeof(*send_node_coords));
341
342 for (int i = 0, l = 0; i < comm_size; ++i) {
343 for (int j = 0; j < recv_count[i]; ++j, ++l) {
344 size_t idx = (size_t)(remote_node_buffer[l]) - local_start_node;
345 send_node_coords[l][0] = dist_node_coords[idx][0];
346 send_node_coords[l][1] = dist_node_coords[idx][1];
347 send_node_coords[l][2] = dist_node_coords[idx][2];
348 }
349 send_count[i] *= 3;
350 recv_count[i] *= 3;
351 send_displ[i] *= 3;
352 recv_displ[i] *= 3;
353 }
354
355 free(remote_node_buffer);
356 free(dist_node_coords);
357
358 MPI_Alltoallv(send_node_coords, recv_count, recv_displ, MPI_DOUBLE,
359 *node_coords, send_count, send_displ, MPI_DOUBLE, comm);
360
361 free(send_node_coords);
362 free(recv_displ);
363 free(send_displ);
364 free(recv_count);
365 free(send_count);
366 }
367
368 // generate elem ids for local partition
369 *elem_ids = xmalloc(num_local_elems * sizeof(**elem_ids));
370 for (size_t i = 0; i < num_local_elems; ++i)
371 (*elem_ids)[i] = (yac_int)(local_start_elem + i);
372
373 // generate num_nodes_per_elem
374 *num_nodes_per_elem =
375 xmalloc(num_local_elems * sizeof(**num_nodes_per_elem));
376 for (size_t i = 0; i < num_local_elems; ++i)
377 (*num_nodes_per_elem)[i] = num_nod_per_elem;
378
379 *num_elem = num_local_elems;
380 *num_nodes = num_core_nodes;
381}
382
383struct temp_edge {
384 size_t node[2];
386};
387
388static int compare_temp_edges(void const * a, void const * b) {
389
390 struct temp_edge const * edge_a = (struct temp_edge const *)a;
391 struct temp_edge const * edge_b = (struct temp_edge const *)b;
392
393 if (edge_a->node[0] != edge_b->node[0])
394 return (edge_a->node[0] > edge_b->node[0])?1:-1;
395 return (edge_a->node[1] > edge_b->node[1]) -
396 (edge_a->node[1] < edge_b->node[1]);
397}
398
399static int check_pole(double * vertex) {
400
401 return fabs(1.0 - fabs(vertex[2])) < 1e-8;
402}
403
404static int check_lon_edge(double * vertex_a_, double * vertex_b_) {
405
406 double vertex_a[3] = {vertex_a_[0], vertex_a_[1], 0.0};
407 double vertex_b[3] = {vertex_b_[0], vertex_b_[1], 0.0};
408
409 normalise_vector(vertex_a);
410 normalise_vector(vertex_b);
411
412 return get_vector_angle(vertex_a, vertex_b) < 1e-6;
413}
414
415static int check_lat_edge(double * vertex_a, double * vertex_b) {
416
417 return fabs(acos(vertex_a[2]) - acos(vertex_b[2])) < 1e-6;
418}
419
421 const char * filename, int use_ll_edges, MPI_Comm comm) {
422
423 yac_coordinate_pointer node_coords;
424 yac_int * elem_ids;
425 yac_int * node_ids;
426 size_t num_elem;
427 size_t num_nodes;
428 int * num_nodes_per_elem;
429 int * num_elem_per_node;
430 size_t * elem_to_node;
431 size_t * node_to_elem;
432
434 filename, comm, &node_coords, &elem_ids, &node_ids, &num_elem, &num_nodes,
435 &num_nodes_per_elem, &num_elem_per_node, &elem_to_node, &node_to_elem);
436
437 size_t num_edges;
438 size_t * elem_to_edge;
439 yac_size_t_2_pointer edge_to_node;
441
442 { // compute edge data
443
444 // compute the maximum number of edge
445 size_t max_num_edges = 0;
446 for (size_t i = 0; i < num_elem; ++i)
447 max_num_edges += num_nodes_per_elem[i];
448
449 // generate temporary array containing edge information
450 struct temp_edge * temp_edges =
451 xmalloc(max_num_edges * sizeof(*temp_edges));
452 for (size_t i = 0, offset = 0, k = 0; i < num_elem; ++i) {
453 size_t * curr_elem_to_node = elem_to_node + offset;
454 size_t curr_num_edges = num_nodes_per_elem[i];
455 offset += curr_num_edges;
456 for (size_t j = 0; j < curr_num_edges; ++j, ++k) {
457 int order =
458 curr_elem_to_node[j] > curr_elem_to_node[(j+1)%curr_num_edges];
459 temp_edges[k].node[order] = curr_elem_to_node[j];
460 temp_edges[k].node[order^1] = curr_elem_to_node[(j+1)%curr_num_edges];
461 temp_edges[k].elem_to_edge_idx = k;
462 }
463 }
464 qsort(temp_edges, max_num_edges,
465 sizeof(*temp_edges), compare_temp_edges);
466
467 // generate elem_to_edge and edge_to_node; count total number of edges
468 elem_to_edge = xmalloc(max_num_edges * sizeof(*elem_to_edge));
469 num_edges = 0;
470 edge_to_node = (yac_size_t_2_pointer)temp_edges;
471 for (size_t i = 0, prev_indices[2] = {SIZE_MAX, SIZE_MAX};
472 i < max_num_edges; ++i) {
473
474 size_t curr_elem_to_edge_idx = temp_edges[i].elem_to_edge_idx;
475 if ((prev_indices[0] != temp_edges[i].node[0]) ||
476 (prev_indices[1] != temp_edges[i].node[1])) {
477
478 prev_indices[0] = temp_edges[i].node[0];
479 prev_indices[1] = temp_edges[i].node[1];
480 edge_to_node[num_edges][0] = prev_indices[0];
481 edge_to_node[num_edges][1] = prev_indices[1];
482 ++num_edges;
483 }
484
485 elem_to_edge[curr_elem_to_edge_idx] = num_edges - 1;
486 }
487 edge_to_node =
488 xrealloc(edge_to_node, num_edges * sizeof(*edge_to_node));
489
490 edge_type = xmalloc(num_edges * sizeof(*edge_type));
491 if (use_ll_edges) {
492 for (size_t i = 0; i < num_edges; ++i) {
493 double * edge_vertex_a = node_coords[edge_to_node[i][0]];
494 double * edge_vertex_b = node_coords[edge_to_node[i][1]];
495 int vertex_a_is_pole = check_pole(edge_vertex_a);
496 int vertex_b_is_pole = check_pole(edge_vertex_b);
497 int is_lon_edge =
498 (vertex_a_is_pole ^ vertex_b_is_pole) ||
499 (!vertex_a_is_pole && !vertex_b_is_pole &&
500 check_lon_edge(edge_vertex_a, edge_vertex_b));
501 int is_lat_edge =
502 (vertex_a_is_pole && vertex_b_is_pole) ||
503 check_lat_edge(edge_vertex_a, edge_vertex_b);
505 is_lon_edge || is_lat_edge,
506 "ERROR(yac_read_exodus_basic_grid_data_parallel): "
507 "\"use_ll_edges == true\" but edge is neither lon nor lat "
508 "((%lf,%lf,%lf),(%lf,%lf,%lf))",
509 edge_vertex_a[0], edge_vertex_a[1], edge_vertex_a[2],
510 edge_vertex_b[0], edge_vertex_b[1], edge_vertex_b[2]);
511 edge_type[i] = (is_lon_edge)?YAC_LON_CIRCLE_EDGE:YAC_LAT_CIRCLE_EDGE;
512 }
513 } else {
514 for (size_t i = 0; i < num_edges; ++i)
515 edge_type[i] = YAC_GREAT_CIRCLE_EDGE;
516 }
517 }
518
520 grid_data.vertex_coordinates = node_coords;
521 grid_data.cell_ids = elem_ids;
522 grid_data.vertex_ids = node_ids;
523 grid_data.edge_ids = NULL;
524 grid_data.num_cells = num_elem;
525 grid_data.num_vertices = num_nodes;
526 grid_data.num_edges = num_edges;
527 grid_data.core_cell_mask = NULL;
528 grid_data.core_vertex_mask = NULL;
529 grid_data.core_edge_mask = NULL;
530 grid_data.num_vertices_per_cell = num_nodes_per_elem;
531 grid_data.num_cells_per_vertex = num_elem_per_node;
532 grid_data.cell_to_vertex = elem_to_node;
533 grid_data.cell_to_vertex_offsets = generate_offsets(num_elem, num_nodes_per_elem);
534 grid_data.cell_to_edge = elem_to_edge;
535 grid_data.cell_to_edge_offsets = grid_data.cell_to_vertex_offsets;
536 grid_data.vertex_to_cell = node_to_elem;
537 grid_data.vertex_to_cell_offsets = generate_offsets(num_nodes, num_elem_per_node);
538 grid_data.edge_to_vertex = edge_to_node;
539 grid_data.edge_type = edge_type;
540 grid_data.num_total_cells = num_elem;
541 grid_data.num_total_vertices = num_nodes;
542 grid_data.num_total_edges = num_edges;
543
544 return grid_data;
545}
546
548 char const * filename, char const * gridname, int use_ll_edges,
549 MPI_Comm comm) {
550
551 return
553 gridname,
554 yac_read_exodus_basic_grid_data_parallel(filename, use_ll_edges, comm));
555}
556
557#else
558
560 const char * filename, MPI_Comm comm, yac_coordinate_pointer * node_coords,
561 yac_int ** elem_ids, yac_int ** node_ids,
562 size_t * num_elem, size_t * num_nodes,
563 int ** num_nodes_per_elem, int ** num_elem_per_node,
564 size_t ** elem_to_node, size_t ** node_to_elem) {
565
566 UNUSED(filename);
567 UNUSED(comm);
568 UNUSED(node_coords);
569 UNUSED(elem_ids);
570 UNUSED(node_ids);
571 UNUSED(num_elem);
572 UNUSED(num_nodes);
573 UNUSED(num_nodes_per_elem);
574 UNUSED(num_elem_per_node);
575 UNUSED(elem_to_node);
576 UNUSED(node_to_elem);
577 die(
578 "ERROR(yac_read_exodus_grid_information_parallel): "
579 "YAC is built without the NetCDF support");
580}
581
583 const char * filename, int use_ll_edges, MPI_Comm comm) {
584
585 UNUSED(filename);
586 UNUSED(use_ll_edges);
587 UNUSED(comm);
588 die(
589 "ERROR(yac_read_exodus_basic_grid_data_parallel): "
590 "YAC is built without the NetCDF support");
591
592 return
594 (size_t[]){0,0}, (int[]){0,0}, NULL, NULL);
595}
596
598 char const * filename, char const * gridname, int use_ll_edges,
599 MPI_Comm comm) {
600
601 UNUSED(filename);
602 UNUSED(gridname);
603 UNUSED(use_ll_edges);
604 UNUSED(comm);
605 die(
606 "ERROR(yac_read_exodus_basic_grid_parallel): "
607 "YAC is built without the NetCDF support");
608
609 return NULL;
610}
611
612#endif // YAC_NETCDF_ENABLED
#define YAC_ASSERT(exp, msg)
struct yac_basic_grid * yac_basic_grid_new(char const *name, struct yac_basic_grid_data grid_data)
Definition basic_grid.c:50
struct yac_basic_grid_data yac_generate_basic_grid_data_reg_2d(size_t nbr_vertices[2], int cyclic[2], double *lon_vertices, double *lat_vertices)
Definition grid_reg2d.c:65
#define UNUSED(x)
Definition core.h:73
static void normalise_vector(double v[])
Definition geometry.h:635
static double get_vector_angle(double const a[3], double const b[3])
Definition geometry.h:340
yac_edge_type
Definition grid_cell.h:12
@ YAC_GREAT_CIRCLE_EDGE
great circle
Definition grid_cell.h:13
@ YAC_LAT_CIRCLE_EDGE
latitude circle
Definition grid_cell.h:14
@ YAC_LON_CIRCLE_EDGE
longitude circle
Definition grid_cell.h:15
void yac_get_io_ranks(MPI_Comm comm, int *local_is_io_, int **io_ranks_, int *num_io_ranks_)
Definition io_utils.c:309
void yac_nc_inq_varid(int ncid, char const *name, int *varidp)
Definition io_utils.c:411
void yac_nc_open(const char *path, int omode, int *ncidp)
Definition io_utils.c:350
void yac_nc_inq_dimid(int ncid, char const *name, int *dimidp)
Definition io_utils.c:385
#define xrealloc(ptr, size)
Definition ppm_xfuncs.h:67
#define xcalloc(nmemb, size)
Definition ppm_xfuncs.h:64
#define xmalloc(size)
Definition ppm_xfuncs.h:66
static int partition_idx_from_element_idx(size_t element_idx, size_t num_elements, int num_partitions)
void yac_read_exodus_grid_information_parallel(const char *filename, MPI_Comm comm, yac_coordinate_pointer *node_coords, yac_int **elem_ids, yac_int **node_ids, size_t *num_elem, size_t *num_nodes, int **num_nodes_per_elem, int **num_elem_per_node, size_t **elem_to_node, size_t **node_to_elem)
static int check_lon_edge(double *vertex_a_, double *vertex_b_)
static size_t * generate_offsets(size_t N, int *counts)
static int check_lat_edge(double *vertex_a, double *vertex_b)
static int check_pole(double *vertex)
struct yac_basic_grid * yac_read_exodus_basic_grid_parallel(char const *filename, char const *gridname, int use_ll_edges, MPI_Comm comm)
static int compare_temp_edges(void const *a, void const *b)
struct yac_basic_grid_data yac_read_exodus_basic_grid_data_parallel(const char *filename, int use_ll_edges, MPI_Comm comm)
size_t vertex[2]
size_t node[2]
size_t elem_to_edge_idx
enum yac_edge_type * edge_type
#define N
#define YAC_HANDLE_ERROR(exp)
Definition toy_output.c:13
void yac_quicksort_index_yac_int_size_t(yac_int *a, size_t n, size_t *idx)
#define YAC_ASSERT_F(exp, format,...)
Definition yac_assert.h:19
#define die(msg)
Definition yac_assert.h:12
#define YAC_MPI_SIZE_T
YAC_INT yac_int
Definition yac_types.h:15
size_t(* yac_size_t_2_pointer)[2]
Definition yac_types.h:23
#define yac_int_dt
Definition yac_types.h:16
double(* yac_coordinate_pointer)[3]
Definition yac_types.h:19