YetAnotherCoupler 3.5.2
Loading...
Searching...
No Matches
read_icon_grid.c
Go to the documentation of this file.
1// Copyright (c) 2024 The YAC Authors
2//
3// SPDX-License-Identifier: BSD-3-Clause
4
5#ifdef HAVE_CONFIG_H
6#include "config.h"
7#endif
8
9#include <stdlib.h>
10#include <stdio.h>
11#include <string.h>
12#include <assert.h>
13#include <math.h>
14
15#include "read_icon_grid.h"
16#include "utils_common.h"
17#include "io_utils.h"
18#include "geometry.h"
19#include "read_grid.h"
20
21#ifdef YAC_NETCDF_ENABLED
22#include <netcdf.h>
23
24static int * get_icon_cell_mask( int ncid, size_t nbr_cells );
25static void get_icon_connect(
26 int ncid, size_t nbr_cells, int ** vertex_of_cell, size_t * nv);
27
29 const char * filename, int * nbr_vertices, int * nbr_cells,
30 int ** num_vertices_per_cell, int ** cell_to_vertex,
31 double ** x_vertices, double ** y_vertices,
32 double ** x_cells, double ** y_cells, int ** global_cell_id,
33 int ** cell_mask, int ** cell_core_mask,
34 int ** global_corner_id, int ** corner_core_mask, int rank, int size) {
35
36 // read the global grid
38 filename, nbr_vertices, nbr_cells, num_vertices_per_cell, cell_to_vertex,
39 x_vertices, y_vertices, x_cells, y_cells, cell_mask);
40
41 // determine local range
42 int local_start =
43 ((unsigned long)(*nbr_cells) * (unsigned long)rank) /
44 (unsigned long)size;
45 int num_local_cells =
46 ((unsigned long)(*nbr_cells) * ((unsigned long)rank+1)) /
47 (unsigned long)size - (unsigned long)local_start;
48
49 // mask for required vertices and cells
50 int * required_vertices = xcalloc(*nbr_vertices, sizeof(*required_vertices));
51 int * required_cells = xcalloc(*nbr_cells, sizeof(*required_cells));
52
53 int offset = 0;
54 for (int i = 0; i < local_start; ++i)
55 offset += (*num_vertices_per_cell)[i];
56
57 // mark all local cells and their vertices as required
58 for (int i = local_start; i < local_start + num_local_cells; ++i) {
59
60 required_cells[i] = 2;
61 for (int j = 0; j < (*num_vertices_per_cell)[i]; ++j)
62 required_vertices[(*cell_to_vertex)[offset+j]] = 2;
63
64 offset += (*num_vertices_per_cell)[i];
65 }
66
67 // mark all halo cells as required
68 offset = 0;
69 for (int i = 0; i < *nbr_cells; ++i) {
70
71 if (!required_cells[i]) {
72
73 for (int j = 0; j < (*num_vertices_per_cell)[i]; ++j) {
74
75 if (required_vertices[(*cell_to_vertex)[offset+j]]) {
76
77 required_cells[i] = 1;
78 break;
79 }
80 }
81
82 }
83
84 offset += (*num_vertices_per_cell)[i];
85 }
86
87 // mark all halo vertices as required
88 offset = 0;
89 for (int i = 0; i < *nbr_cells; ++i) {
90
91 if (required_cells[i] == 1) {
92
93 for (int j = 0; j < (*num_vertices_per_cell)[i]; ++j) {
94
95 if (!required_vertices[(*cell_to_vertex)[offset+j]]) {
96
97 required_vertices[(*cell_to_vertex)[offset+j]] = 1;
98 }
99 }
100 }
101
102 offset += (*num_vertices_per_cell)[i];
103 }
104
105 // count the number of cells and vertices
106 int part_num_vertices = 0;
107 int part_num_cells = 0;
108 for (int i = 0; i < *nbr_vertices; ++i)
109 if (required_vertices[i])
110 part_num_vertices++;
111 for (int i = 0; i < *nbr_cells; ++i)
112 if(required_cells[i])
113 part_num_cells++;
114
115 *global_cell_id = xmalloc(part_num_cells * sizeof(**global_cell_id));
116 *cell_core_mask = xmalloc(part_num_cells * sizeof(**cell_core_mask));
117 *global_corner_id = xmalloc(part_num_vertices * sizeof(**global_corner_id));
118 *corner_core_mask = xmalloc(part_num_vertices * sizeof(**corner_core_mask));
119
120 // generate final vertex data
121 part_num_vertices = 0;
122 int * global_to_local_vertex =
123 xmalloc(*nbr_vertices * sizeof(*global_to_local_vertex));
124 for (int i = 0; i < *nbr_vertices; ++i) {
125
126 if (required_vertices[i]) {
127
128 (*global_corner_id)[part_num_vertices] = i;
129 (*corner_core_mask)[part_num_vertices] = required_vertices[i] == 2;
130 (*x_vertices)[part_num_vertices] = (*x_vertices)[i];
131 (*y_vertices)[part_num_vertices] = (*y_vertices)[i];
132 global_to_local_vertex[i] = part_num_vertices;
133 part_num_vertices++;
134 }
135 }
136
137 *x_vertices = xrealloc(*x_vertices, part_num_vertices * sizeof(**x_vertices));
138 *y_vertices = xrealloc(*y_vertices, part_num_vertices * sizeof(**y_vertices));
139 *nbr_vertices = part_num_vertices;
140 free(required_vertices);
141
142 // generate final cell data
143 int num_cell_vertex_dependencies = 0;
144 part_num_cells = 0;
145 offset = 0;
146 for (int i = 0; i < *nbr_cells; ++i) {
147
148 if (required_cells[i]) {
149
150 (*global_cell_id)[part_num_cells] = i;
151 (*cell_core_mask)[part_num_cells] = required_cells[i] == 2;
152 (*x_cells)[part_num_cells] = (*x_cells)[i];
153 (*y_cells)[part_num_cells] = (*y_cells)[i];
154 (*cell_mask)[part_num_cells] = (*cell_mask)[i];
155
156 for (int j = 0; j < (*num_vertices_per_cell)[i]; ++j)
157 (*cell_to_vertex)[num_cell_vertex_dependencies++] =
158 global_to_local_vertex[(*cell_to_vertex)[offset+j]];
159
160 (*num_vertices_per_cell)[part_num_cells] = (*num_vertices_per_cell)[i];
161
162 part_num_cells++;
163 }
164
165 offset += (*num_vertices_per_cell)[i];
166 }
167
168 *x_cells = xrealloc(*x_cells, part_num_cells * sizeof(**x_cells));
169 *y_cells = xrealloc(*y_cells, part_num_cells * sizeof(**y_cells));
170 *cell_mask = xrealloc(*cell_mask, part_num_cells * sizeof(**cell_mask));
171
172 *num_vertices_per_cell = xrealloc(*num_vertices_per_cell, part_num_cells *
173 sizeof(**num_vertices_per_cell));
174 *cell_to_vertex = xrealloc(*cell_to_vertex, num_cell_vertex_dependencies *
175 sizeof(**cell_to_vertex));
176 *nbr_cells = part_num_cells;
177 free(required_cells);
178 free(global_to_local_vertex);
179}
180
181void yac_read_icon_grid_information(const char * filename, int * nbr_vertices,
182 int * nbr_cells, int ** num_vertices_per_cell,
183 int ** cell_to_vertex, double ** x_vertices,
184 double ** y_vertices, double ** x_cells,
185 double ** y_cells, int ** cell_mask) {
186
187 /* Open file */
188 int ncid;
189 yac_nc_open(filename, NC_NOWRITE, &ncid);
190
191 /* Get vertex longitudes and latitudes of cells */
192 size_t nbr_vertices_;
194 ncid, "vlon", "vlat", x_vertices, y_vertices, &nbr_vertices_);
195 *nbr_vertices = (int)nbr_vertices_;
196
197 /* Get cell center longitudes and latitudes of cells */
198 size_t nbr_cells_;
199 yac_read_coords(ncid, "clon", "clat", x_cells, y_cells, &nbr_cells_);
200 *nbr_cells = (int)nbr_cells_;
201
202 /* Get mask of cells */
203 *cell_mask = get_icon_cell_mask ( ncid, *nbr_cells );
204
205 /* Get relations between vertices and cells */
206 int * vertex_of_cell;
207 size_t nv;
208 get_icon_connect(ncid, *nbr_cells, &vertex_of_cell, &nv);
209
210 /* Close file */
211 YAC_HANDLE_ERROR(nc_close(ncid));
212
213 //-------------------------------------------------------------------------//
214
215 *num_vertices_per_cell = xmalloc(*nbr_cells * sizeof(**num_vertices_per_cell));
216 for (int i = 0; i < *nbr_cells; (*num_vertices_per_cell)[i++] = (int)nv);
217
218 *cell_to_vertex = xmalloc(*nbr_cells * nv * sizeof(**cell_to_vertex));
219
220 // Unfortunately the data is only available in Fortran order
221 for (int i = 0; i < *nbr_cells; ++i) {
222
223 for (size_t j = 0; j < nv; ++j)
224 (*cell_to_vertex)[nv * i + j] =
225 vertex_of_cell[i + j * (*nbr_cells)] - 1;
226 }
227
228 free(vertex_of_cell);
229}
230
231// taken from scales-ppm library
232// https://www.dkrz.de/redmine/projects/scales-ppm
233static inline int
234partition_idx_from_element_idx(unsigned element_idx, unsigned num_elements,
235 int num_partitions) {
236
237 return (int)((((unsigned long)element_idx) * ((unsigned long)num_partitions) +
238 (unsigned long)num_partitions - 1) /
239 ((unsigned long)num_elements));
240}
241
242static void convert_to_rad(int ncid, int varid, double * array, size_t count) {
243
244 if (yac_check_coord_units(ncid, varid))
245 for (size_t i = 0; i < count; ++i) array[i] *= YAC_RAD;
246}
247
249 const char * filename, MPI_Comm comm, int * nbr_vertices, int * nbr_cells,
250 int ** num_vertices_per_cell, int ** cell_to_vertex, int ** global_cell_ids,
251 int ** cell_owner, int ** global_vertex_ids, int ** vertex_owner,
252 double ** x_vertices, double ** y_vertices,
253 double ** x_cells, double ** y_cells, int ** cell_msk) {
254
255 int comm_rank, comm_size;
256
257 MPI_Comm_rank(comm, &comm_rank);
258 MPI_Comm_size(comm, &comm_size);
259
260 int local_is_io, * io_ranks, num_io_ranks;
261 yac_get_io_ranks(comm, &local_is_io, &io_ranks, &num_io_ranks);
262
263 size_t ncells, nvertices, nv, ne;
264
265 size_t read_local_start_cell = 0;
266 size_t read_num_local_cells = 0;
267 size_t read_local_start_vertex = 0;
268 size_t read_num_local_vertices = 0;
269 double * read_cell_lon = NULL;
270 double * read_cell_lat = NULL;
271 int * read_cell_mask = NULL;
272 double * read_vertex_lon = NULL;
273 double * read_vertex_lat = NULL;
274 int * read_dist_vertex_of_cell = NULL;
275 int * read_dist_cells_of_vertex = NULL;
276
277 if (local_is_io) {
278
279 unsigned long io_proc_idx = ULONG_MAX;
280 for (int i = 0; (i < num_io_ranks) && (io_proc_idx == ULONG_MAX); ++i)
281 if (io_ranks[i] == comm_rank)
282 io_proc_idx = (unsigned long)i;
283
284 // open file
285 int ncid;
286 yac_nc_open(filename, NC_NOWRITE, &ncid);
287
288 // get number of cells and vertices
289 int dim_id;
290 yac_nc_inq_dimid(ncid, "cell", &dim_id);
291 YAC_HANDLE_ERROR(nc_inq_dimlen(ncid, dim_id, &ncells));
292 yac_nc_inq_dimid(ncid, "vertex", &dim_id);
293 YAC_HANDLE_ERROR(nc_inq_dimlen(ncid, dim_id, &nvertices));
294 yac_nc_inq_dimid(ncid, "nv", &dim_id);
295 YAC_HANDLE_ERROR(nc_inq_dimlen(ncid, dim_id, &nv));
296 yac_nc_inq_dimid(ncid, "ne", &dim_id);
297 YAC_HANDLE_ERROR(nc_inq_dimlen(ncid, dim_id, &ne));
298
299 // determine local range for cell and vertex data
300 read_local_start_cell =
301 ((unsigned long)ncells * io_proc_idx) / (unsigned long)num_io_ranks;
302 read_num_local_cells =
303 ((unsigned long)ncells * (io_proc_idx+1)) / (unsigned long)num_io_ranks -
304 (unsigned long)read_local_start_cell;
305 read_local_start_vertex =
306 ((unsigned long)nvertices * io_proc_idx) / (unsigned long)num_io_ranks;
307 read_num_local_vertices =
308 ((unsigned long)nvertices * (io_proc_idx+1)) / (unsigned long)num_io_ranks -
309 (unsigned long)read_local_start_vertex;
310
311 // read basic grid data (each process its individual part)
312 read_cell_lon = xmalloc(read_num_local_cells * sizeof(*read_cell_lon));
313 read_cell_lat = xmalloc(read_num_local_cells * sizeof(*read_cell_lat));
314 read_cell_mask = xmalloc(read_num_local_cells * sizeof(*read_cell_mask));
315 read_vertex_lon = xmalloc(read_num_local_vertices * sizeof(*read_vertex_lon));
316 read_vertex_lat = xmalloc(read_num_local_vertices * sizeof(*read_vertex_lat));
317 read_dist_vertex_of_cell =
318 xmalloc(read_num_local_cells * nv * sizeof(*read_dist_vertex_of_cell));
319 read_dist_cells_of_vertex =
320 xmalloc(read_num_local_vertices * ne * sizeof(*read_dist_cells_of_vertex));
321 int varid;
322 yac_nc_inq_varid(ncid, "clon", &varid);
323 YAC_HANDLE_ERROR(nc_get_vara_double(ncid, varid, &read_local_start_cell,
324 &read_num_local_cells, read_cell_lon));
325 convert_to_rad(ncid, varid, read_cell_lon, read_num_local_cells);
326 yac_nc_inq_varid(ncid, "clat", &varid);
327 YAC_HANDLE_ERROR(nc_get_vara_double(ncid, varid, &read_local_start_cell,
328 &read_num_local_cells, read_cell_lat));
329 convert_to_rad(ncid, varid, read_cell_lat, read_num_local_cells);
330 yac_nc_inq_varid(ncid, "cell_sea_land_mask", &varid);
331 YAC_HANDLE_ERROR(nc_get_vara_int(ncid, varid, &read_local_start_cell,
332 &read_num_local_cells, read_cell_mask));
333 yac_nc_inq_varid(ncid, "vlon", &varid);
334 YAC_HANDLE_ERROR(nc_get_vara_double(ncid, varid, &read_local_start_vertex,
335 &read_num_local_vertices, read_vertex_lon));
336 convert_to_rad(ncid, varid, read_vertex_lon, read_num_local_vertices);
337 yac_nc_inq_varid(ncid, "vlat", &varid);
338 YAC_HANDLE_ERROR(nc_get_vara_double(ncid, varid, &read_local_start_vertex,
339 &read_num_local_vertices, read_vertex_lat));
340 convert_to_rad(ncid, varid, read_vertex_lat, read_num_local_vertices);
341 {
342 int * buffer = xmalloc(MAX(nv*read_num_local_cells,
343 ne*read_num_local_vertices) * sizeof(*buffer));
344 {
345 size_t tmp_start[2] = {0, read_local_start_cell};
346 size_t tmp_count[2] = {nv, read_num_local_cells};
347 yac_nc_inq_varid(ncid, "vertex_of_cell", &varid);
348 YAC_HANDLE_ERROR(nc_get_vara_int(ncid, varid, tmp_start, tmp_count, buffer));
349 for (size_t i = 0; i < read_num_local_cells; ++i)
350 for (size_t j = 0; j < nv; ++j)
351 read_dist_vertex_of_cell[i * nv + j] =
352 buffer[i + j * read_num_local_cells];
353 }
354 {
355 size_t tmp_start[2] = {0, read_local_start_vertex};
356 size_t tmp_count[2] = {ne, read_num_local_vertices};
357 yac_nc_inq_varid(ncid, "cells_of_vertex", &varid);
358 YAC_HANDLE_ERROR(nc_get_vara_int(ncid, varid, tmp_start, tmp_count, buffer));
359 for (size_t i = 0; i < read_num_local_vertices; ++i)
360 for (size_t j = 0; j < ne; ++j)
361 read_dist_cells_of_vertex[i * ne + j] =
362 buffer[i + j * read_num_local_vertices];
363 }
364 free(buffer);
365
366 // adjust for c indices
367 for (size_t i = 0; i < read_num_local_cells * nv; ++i)
368 if (read_dist_vertex_of_cell[i] > 0) read_dist_vertex_of_cell[i]--;
369 for (size_t i = 0; i < read_num_local_vertices * ne; ++i)
370 read_dist_cells_of_vertex[i]--;
371 }
372
373 YAC_HANDLE_ERROR(nc_close(ncid));
374 } else {
375 read_cell_lon = xmalloc(1 * sizeof(*read_cell_lon));
376 read_cell_lat = xmalloc(1 * sizeof(*read_cell_lat));
377 read_cell_mask = xmalloc(1 * sizeof(*read_cell_mask));
378 read_vertex_lon = xmalloc(1 * sizeof(*read_vertex_lon));
379 read_vertex_lat = xmalloc(1 * sizeof(*read_vertex_lat));
380 read_dist_vertex_of_cell = xmalloc(1 * sizeof(*read_dist_vertex_of_cell));
381 read_dist_cells_of_vertex = xmalloc(1 * sizeof(*read_dist_cells_of_vertex));
382 }
383
384 free(io_ranks);
385
386 {
387 int tmp;
388 if (comm_rank == 0) tmp = (int)ncells;
389 MPI_Bcast(&tmp, 1, MPI_INT, 0, comm);
390 ncells = (size_t)tmp;
391 if (comm_rank == 0) tmp = (int)nvertices;
392 MPI_Bcast(&tmp, 1, MPI_INT, 0, comm);
393 nvertices = (size_t)tmp;
394 if (comm_rank == 0) tmp = (int)nv;
395 MPI_Bcast(&tmp, 1, MPI_INT, 0, comm);
396 nv = (size_t)tmp;
397 if (comm_rank == 0) tmp = (int)ne;
398 MPI_Bcast(&tmp, 1, MPI_INT, 0, comm);
399 ne = (size_t)tmp;
400 }
401
402 // determine local range for cell and vertex data
403 size_t local_start_cell =
404 ((unsigned long)ncells * (unsigned long)comm_rank) /
405 (unsigned long)comm_size;
406 size_t num_local_cells =
407 ((unsigned long)ncells * ((unsigned long)comm_rank+1)) /
408 (unsigned long)comm_size - (unsigned long)local_start_cell;
409 size_t local_start_vertex =
410 ((unsigned long)nvertices * (unsigned long)comm_rank) /
411 (unsigned long)comm_size;
412 size_t num_local_vertices =
413 ((unsigned long)nvertices * ((unsigned long)comm_rank+1)) /
414 (unsigned long)comm_size - (unsigned long)local_start_vertex;
415
416 // redistribute basic cell data (from io decomposition)
417 double * cell_lon = xmalloc(num_local_cells * sizeof(*cell_lon));
418 double * cell_lat = xmalloc(num_local_cells * sizeof(*cell_lat));
419 int * cell_mask = xmalloc(num_local_cells * sizeof(*cell_mask));
420 int * dist_vertex_of_cell =
421 xmalloc(num_local_cells * nv * sizeof(*dist_vertex_of_cell));
422 {
423 int * send_count = xcalloc(comm_size, sizeof(*send_count));
424 int * recv_count = xcalloc(comm_size, sizeof(*recv_count));
425
426 for (size_t i = 0; i < read_num_local_cells; ++i)
427 send_count[
429 read_local_start_cell + i, ncells, comm_size)]++;
430
431 MPI_Alltoall(send_count, 1, MPI_INT, recv_count, 1, MPI_INT, comm);
432
433 int * send_displ = xmalloc(comm_size * sizeof(*send_displ));
434 int * recv_displ = xmalloc(comm_size * sizeof(*recv_displ));
435 int send_accum = 0, recv_accum = 0;
436 for (int i = 0; i < comm_size; ++i) {
437 send_displ[i] = send_accum;
438 recv_displ[i] = recv_accum;
439 send_accum += send_count[i];
440 recv_accum += recv_count[i];
441 }
442
443 MPI_Alltoallv(read_cell_lon, send_count, send_displ, MPI_DOUBLE,
444 cell_lon, recv_count, recv_displ, MPI_DOUBLE, comm);
445 MPI_Alltoallv(read_cell_lat, send_count, send_displ, MPI_DOUBLE,
446 cell_lat, recv_count, recv_displ, MPI_DOUBLE, comm);
447 MPI_Alltoallv(read_cell_mask, send_count, send_displ, MPI_INT,
448 cell_mask, recv_count, recv_displ, MPI_INT, comm);
449
450 for (int i = 0; i < comm_size; ++i) {
451 send_count[i] *= nv;
452 send_displ[i] *= nv;
453 recv_count[i] *= nv;
454 recv_displ[i] *= nv;
455 }
456
457 MPI_Alltoallv(read_dist_vertex_of_cell, send_count, send_displ, MPI_INT,
458 dist_vertex_of_cell, recv_count, recv_displ, MPI_INT, comm);
459
460 free(recv_displ);
461 free(send_displ);
462 free(recv_count);
463 free(send_count);
464 free(read_cell_lon);
465 free(read_cell_lat);
466 free(read_cell_mask);
467 free(read_dist_vertex_of_cell);
468 }
469
470 // redistribute basic vertex data (from io decomposition)
471 double * vertex_lon = xmalloc(num_local_vertices * sizeof(*vertex_lon));
472 double * vertex_lat = xmalloc(num_local_vertices * sizeof(*vertex_lat));
473 int * dist_cells_of_vertex =
474 xmalloc(num_local_vertices * ne * sizeof(*dist_cells_of_vertex));
475 {
476 int * send_count = xcalloc(comm_size, sizeof(*send_count));
477 int * recv_count = xcalloc(comm_size, sizeof(*recv_count));
478
479 for (size_t i = 0; i < read_num_local_vertices; ++i)
480 send_count[
482 read_local_start_vertex + i, nvertices, comm_size)]++;
483
484 MPI_Alltoall(send_count, 1, MPI_INT, recv_count, 1, MPI_INT, comm);
485
486 int * send_displ = xmalloc(comm_size * sizeof(*send_displ));
487 int * recv_displ = xmalloc(comm_size * sizeof(*recv_displ));
488 int send_accum = 0, recv_accum = 0;
489 for (int i = 0; i < comm_size; ++i) {
490 send_displ[i] = send_accum;
491 recv_displ[i] = recv_accum;
492 send_accum += send_count[i];
493 recv_accum += recv_count[i];
494 }
495
496 MPI_Alltoallv(read_vertex_lon, send_count, send_displ, MPI_DOUBLE,
497 vertex_lon, recv_count, recv_displ, MPI_DOUBLE, comm);
498 MPI_Alltoallv(read_vertex_lat, send_count, send_displ, MPI_DOUBLE,
499 vertex_lat, recv_count, recv_displ, MPI_DOUBLE, comm);
500
501 for (int i = 0; i < comm_size; ++i) {
502 send_count[i] *= ne;
503 send_displ[i] *= ne;
504 recv_count[i] *= ne;
505 recv_displ[i] *= ne;
506 }
507
508 MPI_Alltoallv(read_dist_cells_of_vertex, send_count, send_displ, MPI_INT,
509 dist_cells_of_vertex, recv_count, recv_displ, MPI_INT, comm);
510
511 free(recv_displ);
512 free(send_displ);
513 free(recv_count);
514 free(send_count);
515 free(read_vertex_lon);
516 free(read_vertex_lat);
517 free(read_dist_cells_of_vertex);
518 }
519
520 // determine required vertices for core cells
521 size_t num_core_vertices = num_local_cells * nv;
522 int * core_vertices = xmalloc(num_core_vertices * sizeof(*core_vertices));
523 {
524 memcpy(core_vertices, dist_vertex_of_cell,
525 num_core_vertices * sizeof(*core_vertices));
526 yac_quicksort_index(core_vertices, num_core_vertices, NULL);
527 yac_remove_duplicates_int(core_vertices, &num_core_vertices);
528 core_vertices =
529 xrealloc(core_vertices, num_core_vertices * sizeof(*core_vertices));
530 }
531
532 // get cells_of_vertex for core vertices and compute dist_vertex_owner
533 int * cells_of_vertex_core =
534 xmalloc(num_core_vertices * ne * sizeof(*cells_of_vertex_core));
535 int * dist_vertex_owner =
536 xmalloc(num_local_vertices * sizeof(*dist_vertex_owner));
537 {
538 int * send_count = xcalloc(comm_size, sizeof(*send_count));
539 int * recv_count = xcalloc(comm_size, sizeof(*recv_count));
540
541 for (size_t i = 0; i < num_core_vertices; ++i)
542 send_count[((unsigned long)(core_vertices[i]) *
543 (unsigned long)comm_size + (unsigned long)comm_size - 1) /
544 (unsigned long)nvertices]++;
545
546 MPI_Alltoall(send_count, 1, MPI_INT, recv_count, 1, MPI_INT, comm);
547
548 int * send_displ = xmalloc(comm_size * sizeof(*send_displ));
549 int * recv_displ = xmalloc(comm_size * sizeof(*recv_displ));
550 int send_accum = 0, recv_accum = 0;
551 for (int i = 0; i < comm_size; ++i) {
552 send_displ[i] = send_accum;
553 recv_displ[i] = recv_accum;
554 send_accum += send_count[i];
555 recv_accum += recv_count[i];
556 }
557
558 int num_core_vertices_remote = 0;
559 for (int i = 0; i < comm_size; ++i)
560 num_core_vertices_remote += recv_count[i];
561
562 int * remote_vertex_buffer =
563 xmalloc(num_core_vertices_remote * sizeof(*remote_vertex_buffer));
564
565 MPI_Alltoallv(core_vertices, send_count, send_displ, MPI_INT,
566 remote_vertex_buffer, recv_count, recv_displ, MPI_INT, comm);
567
568 for (size_t i = 0; i < num_local_vertices; ++i) dist_vertex_owner[i] = -1;
569
570 for (int i = 0, j = 0; i < comm_size; ++i)
571 for (int k = 0; k < recv_count[i]; ++k, ++j)
572 dist_vertex_owner[remote_vertex_buffer[j] - local_start_vertex] = i;
573
574 int * send_cell_of_vertex =
575 xmalloc(num_core_vertices_remote * ne * sizeof(*send_cell_of_vertex));
576
577 for (int i = 0, l = 0, m = 0; i < comm_size; ++i) {
578 for (int j = 0; j < recv_count[i]; ++j, ++l) {
579 int idx = remote_vertex_buffer[l] - local_start_vertex;
580 for (size_t k = 0; k < ne; ++k, ++m)
581 send_cell_of_vertex[m] = dist_cells_of_vertex[idx * ne + k];
582 }
583 }
584 free(dist_cells_of_vertex);
585
586 for (int i = 0; i < comm_size; ++i) {
587 send_count[i] *= ne;
588 send_displ[i] *= ne;
589 recv_count[i] *= ne;
590 recv_displ[i] *= ne;
591 }
592
593 MPI_Alltoallv(send_cell_of_vertex, recv_count, recv_displ, MPI_INT,
594 cells_of_vertex_core, send_count, send_displ, MPI_INT, comm);
595
596 free(send_cell_of_vertex);
597 free(remote_vertex_buffer);
598 free(recv_displ);
599 free(send_displ);
600 free(recv_count);
601 free(send_count);
602 }
603
604 // determine halo cells
605 int * halo_cells;
606 size_t num_halo_cells = 0;
607 {
608 int * tmp_cells = cells_of_vertex_core;
609 size_t num_tmp_cells = num_core_vertices * ne;
610
611 yac_quicksort_index(tmp_cells, num_tmp_cells, NULL);
612 yac_remove_duplicates_int(tmp_cells, &num_tmp_cells);
613
614 if ((num_tmp_cells > 0) && (tmp_cells[0] == -1)) {
615 num_tmp_cells--;
616 tmp_cells++;
617 }
618
619 halo_cells = xmalloc((num_tmp_cells - num_local_cells) * sizeof(*halo_cells));
620
621 size_t i = 0;
622 for (; i < num_tmp_cells && tmp_cells[i] < (int)local_start_cell; ++i)
623 halo_cells[num_halo_cells++] = tmp_cells[i];
624 i += num_local_cells;
625 for (; i < num_tmp_cells; ++i) halo_cells[num_halo_cells++] = tmp_cells[i];
626
627 assert(num_halo_cells == num_tmp_cells - num_local_cells);
628
629 free(cells_of_vertex_core);
630 }
631
632 // determine all vertices and get coordinates of halo cells
633 size_t num_all_local_vertices = num_halo_cells * nv + num_core_vertices;
634 int * all_cell_to_vertex;
635 int * all_local_vertices = xrealloc(core_vertices, num_all_local_vertices *
636 sizeof(*all_local_vertices));
637 cell_lon = xrealloc(cell_lon, (num_local_cells + num_halo_cells) *
638 sizeof(*cell_lon));
639 cell_lat = xrealloc(cell_lat, (num_local_cells + num_halo_cells) *
640 sizeof(*cell_lat));
641 cell_mask = xrealloc(cell_mask, (num_local_cells + num_halo_cells) *
642 sizeof(*cell_mask));
643 {
644 int * send_count = xcalloc(comm_size, sizeof(*send_count));
645 int * recv_count = xcalloc(comm_size, sizeof(*recv_count));
646
647 for (size_t i = 0; i < num_halo_cells; ++i)
648 send_count[((unsigned long)(halo_cells[i]) *
649 (unsigned long)comm_size + (unsigned long)comm_size - 1) /
650 (unsigned long)ncells]++;
651
652 MPI_Alltoall(send_count, 1, MPI_INT, recv_count, 1, MPI_INT, comm);
653
654 int * send_displ = xmalloc(comm_size * sizeof(*send_displ));
655 int * recv_displ = xmalloc(comm_size * sizeof(*recv_displ));
656 int send_accum = 0, recv_accum = 0;
657 for (int i = 0; i < comm_size; ++i) {
658 send_displ[i] = send_accum;
659 recv_displ[i] = recv_accum;
660 send_accum += send_count[i];
661 recv_accum += recv_count[i];
662 }
663
664 int num_halo_cells_remote = 0;
665 for (int i = 0; i < comm_size; ++i)
666 num_halo_cells_remote += recv_count[i];
667
668 int * remote_halo_cell_buffer =
669 xmalloc(num_halo_cells_remote * sizeof(*remote_halo_cell_buffer));
670
671 MPI_Alltoallv(halo_cells, send_count, send_displ, MPI_INT,
672 remote_halo_cell_buffer, recv_count, recv_displ, MPI_INT,
673 comm);
674
675 int * send_halo_cell_vertices =
676 xmalloc(num_halo_cells_remote * nv * sizeof(*send_halo_cell_vertices));
677 double * send_cell_lon =
678 xmalloc(num_halo_cells_remote * sizeof(*send_cell_lon));
679 double * send_cell_lat =
680 xmalloc(num_halo_cells_remote * sizeof(*send_cell_lat));
681 int * send_cell_mask =
682 xmalloc(num_halo_cells_remote * sizeof(*send_cell_mask));
683
684 for (int i = 0, l = 0, m = 0; i < comm_size; ++i) {
685 for (int j = 0; j < recv_count[i]; ++j, ++l) {
686 int idx = remote_halo_cell_buffer[l] - local_start_cell;
687 for (size_t k = 0; k < nv; ++k, ++m)
688 send_halo_cell_vertices[m] = dist_vertex_of_cell[idx * nv + k];
689 send_cell_lon[l] = cell_lon[idx];
690 send_cell_lat[l] = cell_lat[idx];
691 send_cell_mask[l] = cell_mask[idx];
692 }
693 }
694
695 MPI_Alltoallv(send_cell_lon, recv_count, recv_displ, MPI_DOUBLE,
696 cell_lon + num_local_cells, send_count, send_displ,
697 MPI_DOUBLE, comm);
698 MPI_Alltoallv(send_cell_lat, recv_count, recv_displ, MPI_DOUBLE,
699 cell_lat + num_local_cells, send_count, send_displ,
700 MPI_DOUBLE, comm);
701 MPI_Alltoallv(send_cell_mask, recv_count, recv_displ, MPI_INT,
702 cell_mask + num_local_cells, send_count, send_displ,
703 MPI_INT, comm);
704
705 for (int i = 0; i < comm_size; ++i) {
706 send_count[i] *= nv;
707 send_displ[i] *= nv;
708 recv_count[i] *= nv;
709 recv_displ[i] *= nv;
710 }
711
712 all_cell_to_vertex =
713 xrealloc(dist_vertex_of_cell, (num_local_cells + num_halo_cells) * nv *
714 sizeof(*all_cell_to_vertex));
715
716 MPI_Alltoallv(send_halo_cell_vertices, recv_count, recv_displ, MPI_INT,
717 all_cell_to_vertex + num_local_cells * nv, send_count,
718 send_displ, MPI_INT, comm);
719
720 memcpy(all_local_vertices + num_core_vertices,
721 all_cell_to_vertex + num_local_cells * nv,
722 num_halo_cells * nv * sizeof(*all_local_vertices));
723
724 free(send_cell_mask);
725 free(send_cell_lat);
726 free(send_cell_lon);
727 free(send_halo_cell_vertices);
728 free(remote_halo_cell_buffer);
729 free(recv_displ);
730 free(send_displ);
731 free(recv_count);
732 free(send_count);
733
735 all_local_vertices, num_all_local_vertices, NULL);
736 yac_remove_duplicates_int(all_local_vertices, &num_all_local_vertices);
737 }
738
739 // determine owner and coordinates for all vertices
740 double * all_vertex_lon =
741 xmalloc(num_all_local_vertices * sizeof(*all_vertex_lon));
742 double * all_vertex_lat =
743 xmalloc(num_all_local_vertices * sizeof(*all_vertex_lat));
744 int * all_local_vertices_owner =
745 xmalloc(num_all_local_vertices * sizeof(*all_local_vertices_owner));
746 {
747 int * send_count = xcalloc(comm_size, sizeof(*send_count));
748 int * recv_count = xcalloc(comm_size, sizeof(*recv_count));
749
750 for (size_t i = 0; i < num_all_local_vertices; ++i)
751 send_count[((unsigned long)(all_local_vertices[i]) *
752 (unsigned long)comm_size + (unsigned long)comm_size - 1) /
753 (unsigned long)nvertices]++;
754
755 MPI_Alltoall(send_count, 1, MPI_INT, recv_count, 1, MPI_INT, comm);
756
757 int * send_displ = xmalloc(comm_size * sizeof(*send_displ));
758 int * recv_displ = xmalloc(comm_size * sizeof(*recv_displ));
759 int send_accum = 0, recv_accum = 0;
760 for (int i = 0; i < comm_size; ++i) {
761 send_displ[i] = send_accum;
762 recv_displ[i] = recv_accum;
763 send_accum += send_count[i];
764 recv_accum += recv_count[i];
765 }
766
767 int num_all_local_vertices_remote = 0;
768 for (int i = 0; i < comm_size; ++i)
769 num_all_local_vertices_remote += recv_count[i];
770
771 int * remote_vertex_buffer =
772 xmalloc(num_all_local_vertices_remote * sizeof(*remote_vertex_buffer));
773
774 MPI_Alltoallv(all_local_vertices, send_count, send_displ, MPI_INT,
775 remote_vertex_buffer, recv_count, recv_displ, MPI_INT, comm);
776
777 int * send_vertex_owner = remote_vertex_buffer;
778 double * send_vertex_lon =
779 xmalloc(num_all_local_vertices_remote * sizeof(*send_vertex_lon));
780 double * send_vertex_lat =
781 xmalloc(num_all_local_vertices_remote * sizeof(*send_vertex_lat));
782
783 for (int i = 0, l = 0; i < comm_size; ++i) {
784 for (int j = 0; j < recv_count[i]; ++j, ++l) {
785 int idx = remote_vertex_buffer[l] - local_start_vertex;
786 send_vertex_owner[l] = dist_vertex_owner[idx];
787 send_vertex_lon[l] = vertex_lon[idx];
788 send_vertex_lat[l] = vertex_lat[idx];
789 }
790 }
791
792 free(dist_vertex_owner);
793 free(vertex_lon);
794 free(vertex_lat);
795
796 MPI_Alltoallv(send_vertex_owner, recv_count, recv_displ, MPI_INT,
797 all_local_vertices_owner, send_count, send_displ, MPI_INT,
798 comm);
799 MPI_Alltoallv(send_vertex_lon, recv_count, recv_displ, MPI_DOUBLE,
800 all_vertex_lon, send_count, send_displ, MPI_DOUBLE, comm);
801 MPI_Alltoallv(send_vertex_lat, recv_count, recv_displ, MPI_DOUBLE,
802 all_vertex_lat, send_count, send_displ, MPI_DOUBLE, comm);
803
804 free(send_vertex_lat);
805 free(send_vertex_lon);
806 free(send_vertex_owner);
807 free(recv_displ);
808 free(send_displ);
809 free(recv_count);
810 free(send_count);
811 }
812
813 // convert global ids within all_cell_to_vertex into local ids
814 {
815 size_t vertex_count = nv * (num_local_cells + num_halo_cells);
816 int * permutation = xmalloc(vertex_count * sizeof(*permutation));
817 for (size_t i = 0; i < vertex_count; ++i) permutation[i] = (int)i;
818
819 yac_quicksort_index(all_cell_to_vertex, vertex_count, permutation);
820
821 for (size_t i = 0, j = 0; i < vertex_count; ++i) {
822 while (all_local_vertices[j] != all_cell_to_vertex[i]) ++j;
823 all_cell_to_vertex[i] = (int)j;
824 }
825
826 yac_quicksort_index(permutation, vertex_count, all_cell_to_vertex);
827 free(permutation);
828 }
829
830 *nbr_vertices = num_all_local_vertices;
831 *nbr_cells = num_local_cells + num_halo_cells;
832
833 *num_vertices_per_cell =
834 xmalloc((num_local_cells + num_halo_cells) * sizeof(**num_vertices_per_cell));
835 for (size_t i = 0; i < num_local_cells + num_halo_cells; ++i)
836 (*num_vertices_per_cell)[i] = nv;
837
838 *cell_to_vertex = all_cell_to_vertex;
839
840 *global_cell_ids =
841 xmalloc((num_local_cells + num_halo_cells) * sizeof(**global_cell_ids));
842 for (size_t i = 0; i < num_local_cells; ++i)
843 (*global_cell_ids)[i] = local_start_cell + i;
844 memcpy(*global_cell_ids + num_local_cells, halo_cells,
845 num_halo_cells * sizeof(*halo_cells));
846 *global_vertex_ids = xrealloc(all_local_vertices, num_all_local_vertices *
847 sizeof(*all_local_vertices));
848
849 *cell_owner =
850 xmalloc((num_local_cells + num_halo_cells) * sizeof(**cell_owner));
851 for (size_t i = 0; i < num_local_cells; ++i)
852 (*cell_owner)[i] = -1;
853 for (size_t i = 0; i < num_halo_cells; ++i)
854 (*cell_owner)[num_local_cells + i] =
855 ((unsigned long)(halo_cells[i]) * (unsigned long)comm_size +
856 (unsigned long)comm_size - 1) / (unsigned long)ncells;
857 free(halo_cells);
858
859 for (size_t i = 0; i < num_all_local_vertices; ++i)
860 if (all_local_vertices_owner[i] == comm_rank)
861 all_local_vertices_owner[i] = -1;
862 *vertex_owner = all_local_vertices_owner;
863
864 *x_vertices = all_vertex_lon;
865 *y_vertices = all_vertex_lat;
866 *x_cells = cell_lon;
867 *y_cells = cell_lat;
868 *cell_msk = cell_mask;
869}
870
872 const char * filename, MPI_Fint comm, int * nbr_vertices, int * nbr_cells,
873 int ** num_vertices_per_cell, int ** cell_to_vertex, int ** global_cell_ids,
874 int ** cell_owner, int ** global_vertex_ids, int ** vertex_owner,
875 double ** x_vertices, double ** y_vertices,
876 double ** x_cells, double ** y_cells, int ** cell_msk) {
877
879 filename, MPI_Comm_f2c(comm), nbr_vertices, nbr_cells,
880 num_vertices_per_cell, cell_to_vertex, global_cell_ids,
881 cell_owner, global_vertex_ids, vertex_owner,
882 x_vertices, y_vertices, x_cells, y_cells, cell_msk);
883}
884
885static int * generate_simple_core_mask(size_t N) {
886 int * mask = xmalloc(N * sizeof(*mask));
887 for (size_t i = 0; i < N; ++i) mask[i] = 1;
888 return mask;
889}
890
891static size_t * generate_offsets(size_t N, int * counts) {
892
893 size_t * offsets = xmalloc(N * sizeof(*offsets));
894 for (size_t i = 0, accu = 0; i < N; ++i) {
895 offsets[i] = accu;
896 accu += (size_t)(counts[i]);
897 }
898 return offsets;
899}
900
902 const char * filename, MPI_Comm comm) {
903
904 int comm_rank, comm_size;
905
906 MPI_Comm_rank(comm, &comm_rank);
907 MPI_Comm_size(comm, &comm_size);
908
909 int local_is_io, * io_ranks, num_io_ranks;
910 yac_get_io_ranks(comm, &local_is_io, &io_ranks, &num_io_ranks);
911
912 size_t ncells, nvertices, nedges, nv, ne;
913
914 size_t read_local_start_cell = 0;
915 size_t read_num_local_cells = 0;
916 size_t read_local_start_vertex = 0;
917 size_t read_num_local_vertices = 0;
918 size_t read_local_start_edge = 0;
919 size_t read_num_local_edges = 0;
920 double * read_vertex_lon = NULL;
921 double * read_vertex_lat = NULL;
922 int * read_dist_vertex_of_cell = NULL;
923 int * read_dist_edge_of_cell = NULL;
924 int * read_dist_edge_vertices = NULL;
925
926 if (local_is_io) {
927
928 unsigned long io_proc_idx = ULONG_MAX;
929 for (int i = 0; (i < num_io_ranks) && (io_proc_idx == ULONG_MAX); ++i)
930 if (io_ranks[i] == comm_rank)
931 io_proc_idx = (unsigned long)i;
932
933 // open file
934 int ncid;
935 yac_nc_open(filename, NC_NOWRITE, &ncid);
936
937 // get number of cells and vertices
938 int dim_id;
939 yac_nc_inq_dimid(ncid, "cell", &dim_id);
940 YAC_HANDLE_ERROR(nc_inq_dimlen(ncid, dim_id, &ncells));
941 yac_nc_inq_dimid(ncid, "vertex", &dim_id);
942 YAC_HANDLE_ERROR(nc_inq_dimlen(ncid, dim_id, &nvertices));
943 yac_nc_inq_dimid(ncid, "edge", &dim_id);
944 YAC_HANDLE_ERROR(nc_inq_dimlen(ncid, dim_id, &nedges));
945 yac_nc_inq_dimid(ncid, "nv", &dim_id);
946 YAC_HANDLE_ERROR(nc_inq_dimlen(ncid, dim_id, &nv));
947 yac_nc_inq_dimid(ncid, "ne", &dim_id);
948 YAC_HANDLE_ERROR(nc_inq_dimlen(ncid, dim_id, &ne));
949
950 // determine local range for cell and vertex data
951 read_local_start_cell =
952 ((unsigned long)ncells * io_proc_idx) / (unsigned long)num_io_ranks;
953 read_num_local_cells =
954 ((unsigned long)ncells * (io_proc_idx+1)) / (unsigned long)num_io_ranks -
955 (unsigned long)read_local_start_cell;
956 read_local_start_vertex =
957 ((unsigned long)nvertices * io_proc_idx) / (unsigned long)num_io_ranks;
958 read_num_local_vertices =
959 ((unsigned long)nvertices * (io_proc_idx+1)) / (unsigned long)num_io_ranks -
960 (unsigned long)read_local_start_vertex;
961 read_local_start_edge =
962 ((unsigned long)nedges * io_proc_idx) / (unsigned long)num_io_ranks;
963 read_num_local_edges =
964 ((unsigned long)nedges * (io_proc_idx+1)) / (unsigned long)num_io_ranks -
965 (unsigned long)read_local_start_edge;
966
967 // read basic grid data (each process its individual part)
968 read_vertex_lon = xmalloc(read_num_local_vertices * sizeof(*read_vertex_lon));
969 read_vertex_lat = xmalloc(read_num_local_vertices * sizeof(*read_vertex_lat));
970 read_dist_vertex_of_cell =
971 xmalloc(read_num_local_cells * nv * sizeof(*read_dist_vertex_of_cell));
972 read_dist_edge_of_cell =
973 xmalloc(read_num_local_cells * nv * sizeof(*read_dist_edge_of_cell));
974 read_dist_edge_vertices =
975 xmalloc(read_num_local_edges * 2 * sizeof(*read_dist_edge_vertices));
976 int varid;
977 yac_nc_inq_varid(ncid, "vlon", &varid);
978 YAC_HANDLE_ERROR(nc_get_vara_double(ncid, varid, &read_local_start_vertex,
979 &read_num_local_vertices, read_vertex_lon));
980 convert_to_rad(ncid, varid, read_vertex_lon, read_num_local_vertices);
981 yac_nc_inq_varid(ncid, "vlat", &varid);
982 YAC_HANDLE_ERROR(nc_get_vara_double(ncid, varid, &read_local_start_vertex,
983 &read_num_local_vertices, read_vertex_lat));
984 convert_to_rad(ncid, varid, read_vertex_lat, read_num_local_vertices);
985 {
986 int * buffer =
987 xmalloc(
988 MAX(nv*read_num_local_cells, 2*read_num_local_edges) *
989 sizeof(*buffer));
990 {
991 size_t tmp_start[2] = {0, read_local_start_cell};
992 size_t tmp_count[2] = {nv, read_num_local_cells};
993 yac_nc_inq_varid(ncid, "vertex_of_cell", &varid);
994 YAC_HANDLE_ERROR(nc_get_vara_int(ncid, varid, tmp_start, tmp_count, buffer));
995 for (size_t i = 0; i < read_num_local_cells; ++i)
996 for (size_t j = 0; j < nv; ++j)
997 read_dist_vertex_of_cell[i * nv + j] =
998 buffer[i + j * read_num_local_cells];
999 yac_nc_inq_varid(ncid, "edge_of_cell", &varid);
1000 YAC_HANDLE_ERROR(nc_get_vara_int(ncid, varid, tmp_start, tmp_count, buffer));
1001 for (size_t i = 0; i < read_num_local_cells; ++i)
1002 for (size_t j = 0; j < nv; ++j)
1003 read_dist_edge_of_cell[i * nv + j] =
1004 buffer[i + j * read_num_local_cells];
1005 }
1006 {
1007 size_t tmp_start[2] = {0, read_local_start_edge};
1008 size_t tmp_count[2] = {2, read_num_local_edges};
1009 yac_nc_inq_varid(ncid, "edge_vertices", &varid);
1010 YAC_HANDLE_ERROR(nc_get_vara_int(ncid, varid, tmp_start, tmp_count, buffer));
1011 for (size_t i = 0; i < read_num_local_edges; ++i)
1012 for (int j = 0; j < 2; ++j)
1013 read_dist_edge_vertices[i * 2 + j] =
1014 buffer[i + j * read_num_local_edges];
1015 }
1016 free(buffer);
1017
1018 // adjust for c indices
1019 for (size_t i = 0; i < read_num_local_cells * nv; ++i)
1020 if (read_dist_vertex_of_cell[i] > 0) read_dist_vertex_of_cell[i]--;
1021 for (size_t i = 0; i < read_num_local_cells * nv; ++i)
1022 if (read_dist_edge_of_cell[i] > 0) read_dist_edge_of_cell[i]--;
1023 for (size_t i = 0; i < read_num_local_edges * 2; ++i)
1024 if (read_dist_edge_vertices[i] > 0) read_dist_edge_vertices[i]--;
1025 }
1026
1027 YAC_HANDLE_ERROR(nc_close(ncid));
1028 } else {
1029 read_vertex_lon = xmalloc(1 * sizeof(*read_vertex_lon));
1030 read_vertex_lat = xmalloc(1 * sizeof(*read_vertex_lat));
1031 read_dist_vertex_of_cell = xmalloc(1 * sizeof(*read_dist_vertex_of_cell));
1032 read_dist_edge_of_cell = xmalloc(1 * sizeof(*read_dist_edge_of_cell));
1033 read_dist_edge_vertices = xmalloc(1 * sizeof(*read_dist_edge_vertices));
1034 }
1035
1036 free(io_ranks);
1037
1038 {
1039 int tmp;
1040 if (comm_rank == 0) tmp = (int)ncells;
1041 MPI_Bcast(&tmp, 1, MPI_INT, 0, comm);
1042 ncells = (size_t)tmp;
1043 if (comm_rank == 0) tmp = (int)nvertices;
1044 MPI_Bcast(&tmp, 1, MPI_INT, 0, comm);
1045 nvertices = (size_t)tmp;
1046 if (comm_rank == 0) tmp = (int)nedges;
1047 MPI_Bcast(&tmp, 1, MPI_INT, 0, comm);
1048 nedges = (size_t)tmp;
1049 if (comm_rank == 0) tmp = (int)nv;
1050 MPI_Bcast(&tmp, 1, MPI_INT, 0, comm);
1051 nv = (size_t)tmp;
1052 if (comm_rank == 0) tmp = (int)ne;
1053 MPI_Bcast(&tmp, 1, MPI_INT, 0, comm);
1054 ne = (size_t)tmp;
1055 }
1056
1057 // determine local range for cell and vertex data
1058 size_t local_start_cell =
1059 ((unsigned long)ncells * (unsigned long)comm_rank) /
1060 (unsigned long)comm_size;
1061 size_t num_local_cells =
1062 ((unsigned long)ncells * ((unsigned long)comm_rank+1)) /
1063 (unsigned long)comm_size - (unsigned long)local_start_cell;
1064 size_t local_start_vertex =
1065 ((unsigned long)nvertices * (unsigned long)comm_rank) /
1066 (unsigned long)comm_size;
1067 size_t num_local_vertices =
1068 ((unsigned long)nvertices * ((unsigned long)comm_rank+1)) /
1069 (unsigned long)comm_size - (unsigned long)local_start_vertex;
1070 size_t local_start_edge =
1071 ((unsigned long)nedges * (unsigned long)comm_rank) /
1072 (unsigned long)comm_size;
1073 size_t num_local_edges =
1074 ((unsigned long)nedges * ((unsigned long)comm_rank+1)) /
1075 (unsigned long)comm_size - (unsigned long)local_start_edge;
1076
1077 // redistribute basic cell data (from io decomposition)
1078 int * dist_vertex_of_cell =
1079 xmalloc(num_local_cells * nv * sizeof(*dist_vertex_of_cell));
1080 int * dist_edge_of_cell =
1081 xmalloc(num_local_cells * nv * sizeof(*dist_edge_of_cell));
1082 {
1083 int * send_count = xcalloc(comm_size, sizeof(*send_count));
1084 int * recv_count = xcalloc(comm_size, sizeof(*recv_count));
1085
1086 for (unsigned i = 0; i < read_num_local_cells; ++i)
1087 send_count[
1089 read_local_start_cell + i, ncells, comm_size)] += nv;
1090
1091 MPI_Alltoall(send_count, 1, MPI_INT, recv_count, 1, MPI_INT, comm);
1092
1093 int * send_displ = xmalloc(comm_size * sizeof(*send_displ));
1094 int * recv_displ = xmalloc(comm_size * sizeof(*recv_displ));
1095 int send_accum = 0, recv_accum = 0;
1096 for (int i = 0; i < comm_size; ++i) {
1097 send_displ[i] = send_accum;
1098 recv_displ[i] = recv_accum;
1099 send_accum += send_count[i];
1100 recv_accum += recv_count[i];
1101 }
1102
1103 MPI_Alltoallv(read_dist_vertex_of_cell, send_count, send_displ, MPI_INT,
1104 dist_vertex_of_cell, recv_count, recv_displ, MPI_INT, comm);
1105 MPI_Alltoallv(read_dist_edge_of_cell, send_count, send_displ, MPI_INT,
1106 dist_edge_of_cell, recv_count, recv_displ, MPI_INT, comm);
1107
1108 free(recv_displ);
1109 free(send_displ);
1110 free(recv_count);
1111 free(send_count);
1112 free(read_dist_vertex_of_cell);
1113 free(read_dist_edge_of_cell);
1114 }
1115
1116 // redistribute basic vertex data (from io decomposition)
1117 double * vertex_lon = xmalloc(num_local_vertices * sizeof(*vertex_lon));
1118 double * vertex_lat = xmalloc(num_local_vertices * sizeof(*vertex_lat));
1119 {
1120 int * send_count = xcalloc(comm_size, sizeof(*send_count));
1121 int * recv_count = xcalloc(comm_size, sizeof(*recv_count));
1122
1123 for (unsigned i = 0; i < read_num_local_vertices; ++i)
1124 send_count[
1126 read_local_start_vertex + i, nvertices, comm_size)]++;
1127
1128 MPI_Alltoall(send_count, 1, MPI_INT, recv_count, 1, MPI_INT, comm);
1129
1130 int * send_displ = xmalloc(comm_size * sizeof(*send_displ));
1131 int * recv_displ = xmalloc(comm_size * sizeof(*recv_displ));
1132 int send_accum = 0, recv_accum = 0;
1133 for (int i = 0; i < comm_size; ++i) {
1134 send_displ[i] = send_accum;
1135 recv_displ[i] = recv_accum;
1136 send_accum += send_count[i];
1137 recv_accum += recv_count[i];
1138 }
1139
1140 MPI_Alltoallv(read_vertex_lon, send_count, send_displ, MPI_DOUBLE,
1141 vertex_lon, recv_count, recv_displ, MPI_DOUBLE, comm);
1142 MPI_Alltoallv(read_vertex_lat, send_count, send_displ, MPI_DOUBLE,
1143 vertex_lat, recv_count, recv_displ, MPI_DOUBLE, comm);
1144
1145 free(recv_displ);
1146 free(send_displ);
1147 free(recv_count);
1148 free(send_count);
1149 free(read_vertex_lon);
1150 free(read_vertex_lat);
1151 }
1152
1153 // redistribute basic edge data (from io decomposition)
1154 int * dist_edge_vertices =
1155 xmalloc(num_local_edges * 2 * sizeof(*dist_edge_vertices));
1156 {
1157 int * send_count = xcalloc(comm_size, sizeof(*send_count));
1158 int * recv_count = xcalloc(comm_size, sizeof(*recv_count));
1159
1160 for (unsigned i = 0; i < read_num_local_edges; ++i)
1161 send_count[
1163 read_local_start_edge + i, nedges, comm_size)] += 2;
1164
1165 MPI_Alltoall(send_count, 1, MPI_INT, recv_count, 1, MPI_INT, comm);
1166
1167 int * send_displ = xmalloc(comm_size * sizeof(*send_displ));
1168 int * recv_displ = xmalloc(comm_size * sizeof(*recv_displ));
1169 int send_accum = 0, recv_accum = 0;
1170 for (int i = 0; i < comm_size; ++i) {
1171 send_displ[i] = send_accum;
1172 recv_displ[i] = recv_accum;
1173 send_accum += send_count[i];
1174 recv_accum += recv_count[i];
1175 }
1176
1177 MPI_Alltoallv(read_dist_edge_vertices, send_count, send_displ, MPI_INT,
1178 dist_edge_vertices, recv_count, recv_displ, MPI_INT, comm);
1179
1180 free(recv_displ);
1181 free(send_displ);
1182 free(recv_count);
1183 free(send_count);
1184 free(read_dist_edge_vertices);
1185 }
1186
1187 // determine required vertices for core cells
1188 // in additional compute num_cells_per_vertex, vertex_to_cell,
1189 // and cell_to_vertex
1190 size_t num_core_vertices;
1191 yac_int * core_vertices;
1193 size_t * vertex_to_cell;
1194 size_t * cell_to_vertex;
1195 {
1196 size_t N = num_local_cells * nv;
1197 core_vertices = xmalloc(N * sizeof(*core_vertices));
1199 vertex_to_cell = xmalloc(N * sizeof(*vertex_to_cell));
1200 cell_to_vertex = xmalloc(N * sizeof(*cell_to_vertex));
1201 for (size_t i = 0; i < N; ++i)
1202 core_vertices[i] = (yac_int)dist_vertex_of_cell[i];
1203 size_t * permutation = vertex_to_cell;
1204 for (size_t i = 0; i < N; ++i) permutation[i] = i;
1205 yac_quicksort_index_yac_int_size_t(core_vertices, N, permutation);
1206 // remove duplicated core vertices and count number of cells per vertex
1207 yac_int prev_vertex_id = XT_INT_MAX;
1208 num_core_vertices = 0;
1209 for (size_t i = 0; i < N; ++i) {
1210 yac_int curr_vertex_id = core_vertices[i];
1211 if (prev_vertex_id == curr_vertex_id) {
1212 num_cells_per_vertex[num_core_vertices-1]++;
1213 } else {
1214 num_cells_per_vertex[num_core_vertices] = 1;
1215 core_vertices[num_core_vertices] = (prev_vertex_id = curr_vertex_id);
1216 ++num_core_vertices;
1217 }
1218 cell_to_vertex[permutation[i]] = num_core_vertices-1;
1219 permutation[i] /= nv;
1220 }
1221 core_vertices =
1222 xrealloc(core_vertices, num_core_vertices * sizeof(*core_vertices));
1225 num_core_vertices * sizeof(*num_cells_per_vertex));
1226 free(dist_vertex_of_cell);
1227 }
1228
1229 // determine required edges for core cells
1230 // in additional compute edge_to_cell and cell_to_edge
1231 size_t num_core_edges;
1232 yac_int * core_edges;
1233 size_t * cell_to_edge;
1234 {
1235 size_t N = num_local_cells * nv;
1236 core_edges = xmalloc(N * sizeof(*core_edges));
1237 size_t * permutation = xmalloc(N * sizeof(*permutation));
1238 cell_to_edge = xmalloc(N * sizeof(*cell_to_edge));
1239 for (size_t i = 0; i < N; ++i)
1240 core_edges[i] = (yac_int)dist_edge_of_cell[i];
1241 for (size_t i = 0; i < N; ++i) permutation[i] = i;
1242 yac_quicksort_index_yac_int_size_t(core_edges, N, permutation);
1243 // remove duplicated core edges and count number of cells per edge
1244 yac_int prev_edge_id = XT_INT_MAX;
1245 num_core_edges = 0;
1246 for (size_t i = 0; i < N; ++i) {
1247 yac_int curr_edge_id = core_edges[i];
1248 if (prev_edge_id != curr_edge_id)
1249 core_edges[num_core_edges++] = (prev_edge_id = curr_edge_id);
1250 cell_to_edge[permutation[i]] = num_core_edges-1;
1251 permutation[i] /= nv;
1252 }
1253 free(permutation);
1254 core_edges =
1255 xrealloc(core_edges, num_core_edges * sizeof(*core_edges));
1256 free(dist_edge_of_cell);
1257 }
1258
1259 // generate vertex coordinate data
1261 xmalloc(num_core_vertices * sizeof(*vertex_coordinates));
1262 {
1263 double * local_vertex_lon =
1264 xmalloc(num_core_vertices * sizeof(*local_vertex_lon));
1265 double * local_vertex_lat =
1266 xmalloc(num_core_vertices * sizeof(*local_vertex_lat));
1267 int * send_count = xcalloc(comm_size, sizeof(*send_count));
1268 int * recv_count = xcalloc(comm_size, sizeof(*recv_count));
1269
1270 for (size_t i = 0; i < num_core_vertices; ++i)
1271 send_count[((unsigned long)(core_vertices[i]) *
1272 (unsigned long)comm_size + (unsigned long)comm_size - 1) /
1273 (unsigned long)nvertices]++;
1274
1275 MPI_Alltoall(send_count, 1, MPI_INT, recv_count, 1, MPI_INT, comm);
1276
1277 int * send_displ = xmalloc(comm_size * sizeof(*send_displ));
1278 int * recv_displ = xmalloc(comm_size * sizeof(*recv_displ));
1279 int send_accum = 0, recv_accum = 0;
1280 for (int i = 0; i < comm_size; ++i) {
1281 send_displ[i] = send_accum;
1282 recv_displ[i] = recv_accum;
1283 send_accum += send_count[i];
1284 recv_accum += recv_count[i];
1285 }
1286
1287 int num_all_local_vertices_remote = 0;
1288 for (int i = 0; i < comm_size; ++i)
1289 num_all_local_vertices_remote += recv_count[i];
1290
1291 yac_int * remote_vertex_buffer =
1292 xmalloc(num_all_local_vertices_remote * sizeof(*remote_vertex_buffer));
1293
1294 MPI_Alltoallv(
1295 core_vertices, send_count, send_displ, yac_int_dt,
1296 remote_vertex_buffer, recv_count, recv_displ, yac_int_dt, comm);
1297
1298 double * send_vertex_lon =
1299 xmalloc(num_all_local_vertices_remote * sizeof(*send_vertex_lon));
1300 double * send_vertex_lat =
1301 xmalloc(num_all_local_vertices_remote * sizeof(*send_vertex_lat));
1302
1303 for (int i = 0, l = 0; i < comm_size; ++i) {
1304 for (int j = 0; j < recv_count[i]; ++j, ++l) {
1305 size_t idx = (size_t)(remote_vertex_buffer[l]) - local_start_vertex;
1306 send_vertex_lon[l] = vertex_lon[idx];
1307 send_vertex_lat[l] = vertex_lat[idx];
1308 }
1309 }
1310
1311 free(remote_vertex_buffer);
1312 free(vertex_lon);
1313 free(vertex_lat);
1314
1315 MPI_Alltoallv(send_vertex_lon, recv_count, recv_displ, MPI_DOUBLE,
1316 local_vertex_lon, send_count, send_displ, MPI_DOUBLE, comm);
1317 MPI_Alltoallv(send_vertex_lat, recv_count, recv_displ, MPI_DOUBLE,
1318 local_vertex_lat, send_count, send_displ, MPI_DOUBLE, comm);
1319
1320 for (unsigned i = 0; i < num_core_vertices; ++i)
1321 LLtoXYZ(
1322 local_vertex_lon[i], local_vertex_lat[i], &(vertex_coordinates[i][0]));
1323
1324 free(send_vertex_lat);
1325 free(send_vertex_lon);
1326 free(recv_displ);
1327 free(send_displ);
1328 free(recv_count);
1329 free(send_count);
1330 free(local_vertex_lon);
1331 free(local_vertex_lat);
1332 }
1333
1334 // generate edge vertex data
1335 size_t * edge_to_vertex =
1336 xmalloc(2 * num_core_edges * sizeof(*edge_to_vertex));
1337 {
1338 int * local_edge_to_vertex =
1339 xmalloc(2 * num_core_edges * sizeof(*local_edge_to_vertex));
1340 int * send_count = xcalloc(comm_size, sizeof(*send_count));
1341 int * recv_count = xcalloc(comm_size, sizeof(*recv_count));
1342
1343 for (size_t i = 0; i < num_core_edges; ++i)
1344 send_count[((unsigned long)(core_edges[i]) *
1345 (unsigned long)comm_size + (unsigned long)comm_size - 1) /
1346 (unsigned long)nedges]++;
1347
1348 MPI_Alltoall(send_count, 1, MPI_INT, recv_count, 1, MPI_INT, comm);
1349
1350 int * send_displ = xmalloc(comm_size * sizeof(*send_displ));
1351 int * recv_displ = xmalloc(comm_size * sizeof(*recv_displ));
1352 int send_accum = 0, recv_accum = 0;
1353 for (int i = 0; i < comm_size; ++i) {
1354 send_displ[i] = send_accum;
1355 recv_displ[i] = recv_accum;
1356 send_accum += send_count[i];
1357 recv_accum += recv_count[i];
1358 }
1359
1360 int num_all_local_edges_remote = 0;
1361 for (int i = 0; i < comm_size; ++i)
1362 num_all_local_edges_remote += recv_count[i];
1363
1364 yac_int * remote_edge_buffer =
1365 xmalloc(num_all_local_edges_remote * sizeof(*remote_edge_buffer));
1366
1367 MPI_Alltoallv(
1368 core_edges, send_count, send_displ, yac_int_dt,
1369 remote_edge_buffer, recv_count, recv_displ, yac_int_dt, comm);
1370
1371 int * send_edge_vertices =
1372 xmalloc(2 * num_all_local_edges_remote * sizeof(*send_edge_vertices));
1373
1374 for (int i = 0, l = 0; i < comm_size; ++i) {
1375 for (int j = 0; j < recv_count[i]; ++j, ++l) {
1376 size_t idx = (size_t)(remote_edge_buffer[l]) - local_start_edge;
1377 send_edge_vertices[2*l+0] = dist_edge_vertices[2*idx+0];
1378 send_edge_vertices[2*l+1] = dist_edge_vertices[2*idx+1];
1379
1380 }
1381 send_count[i] *= 2;
1382 recv_count[i] *= 2;
1383 send_displ[i] *= 2;
1384 recv_displ[i] *= 2;
1385 }
1386
1387 free(remote_edge_buffer);
1388 free(dist_edge_vertices);
1389
1390 MPI_Alltoallv(send_edge_vertices, recv_count, recv_displ, MPI_INT,
1391 local_edge_to_vertex, send_count, send_displ, MPI_INT, comm);
1392
1393 size_t * permutation = xmalloc(2 * num_core_edges * sizeof(*permutation));
1394 for (size_t i = 0; i < 2 * num_core_edges; ++i) permutation[i] = i;
1395
1397 local_edge_to_vertex, 2 * num_core_edges, permutation);
1398
1399 for (size_t i = 0, j = 0; i < 2 * num_core_edges; ++i) {
1400 yac_int curr_vertex = (yac_int)(local_edge_to_vertex[i]);
1401 while ((j < nvertices) && (core_vertices[j] < curr_vertex)) ++j;
1402 YAC_ASSERT(
1403 (j < nvertices) && (core_vertices[j] == curr_vertex),
1404 "ERROR(yac_read_icon_basic_grid_data_parallel): vertex id missmatch")
1405 edge_to_vertex[permutation[i]] = j;
1406 }
1407
1408 free(permutation);
1409 free(send_edge_vertices);
1410 free(recv_displ);
1411 free(send_displ);
1412 free(recv_count);
1413 free(send_count);
1414 free(local_edge_to_vertex);
1415 }
1416
1417 // generate cell ids for local partition
1418 yac_int * cell_ids = xmalloc(num_local_cells * sizeof(*cell_ids));
1419 for (size_t i = 0; i < num_local_cells; ++i)
1420 cell_ids[i] = (yac_int)(local_start_cell + i);
1421
1422 // generate num_vertices_per_cell
1423 int * num_vertices_per_cell =
1424 xmalloc(num_local_cells * sizeof(*num_vertices_per_cell));
1425 for (size_t i = 0; i < num_local_cells; ++i) num_vertices_per_cell[i] = nv;
1426
1427 // generate edge_type (for icon grids this is always YAC_GREAT_CIRCLE_EDGE)
1428 enum yac_edge_type * edge_type = xmalloc(num_core_edges * sizeof(*edge_type));
1429 for (size_t i = 0; i < num_core_edges; ++i) edge_type[i] = YAC_GREAT_CIRCLE_EDGE;
1430
1431 struct yac_basic_grid_data grid_data;
1433 grid_data.cell_ids = cell_ids;
1434 grid_data.vertex_ids = core_vertices;
1435 grid_data.edge_ids = core_edges;
1436 grid_data.num_cells = num_local_cells;
1437 grid_data.num_vertices = num_core_vertices;
1438 grid_data.num_edges = num_core_edges;
1439 grid_data.core_cell_mask = generate_simple_core_mask(num_local_cells);
1440 grid_data.core_vertex_mask = generate_simple_core_mask(num_core_vertices);
1441 grid_data.core_edge_mask = generate_simple_core_mask(num_core_edges);
1444 grid_data.cell_to_vertex = cell_to_vertex;
1446 grid_data.cell_to_edge = cell_to_edge;
1447 grid_data.cell_to_edge_offsets = grid_data.cell_to_vertex_offsets;
1448 grid_data.vertex_to_cell = vertex_to_cell;
1449 grid_data.vertex_to_cell_offsets = generate_offsets(num_core_vertices, num_cells_per_vertex);
1451 grid_data.edge_type = edge_type;
1452 grid_data.num_total_cells = num_local_cells;
1453 grid_data.num_total_vertices = num_core_vertices;
1454 grid_data.num_total_edges = num_core_edges;
1455
1456 return grid_data;
1457}
1458
1460 char const * filename, char const * gridname, MPI_Comm comm) {
1461
1462 return
1464 gridname,
1466}
1467
1469 char const * filename, char const * gridname, MPI_Fint comm) {
1470
1471 return
1473 filename, gridname, MPI_Comm_f2c(comm));
1474}
1475
1477 char const * filename) {
1478
1479 int nbr_vertices;
1480 int nbr_cells;
1481 int * num_vertices_per_cell = NULL;
1482 int * cell_to_vertex = NULL;
1483 int * cell_mask = NULL;
1484
1485 double * x_vertices = NULL;
1486 double * y_vertices = NULL;
1487 double * x_cells = NULL;
1488 double * y_cells = NULL;
1489
1490 yac_read_icon_grid_information(filename, &nbr_vertices, &nbr_cells,
1492 &x_vertices, &y_vertices,
1493 &x_cells, &y_cells,
1494 &cell_mask);
1495
1496 free(x_cells);
1497 free(y_cells);
1498 free(cell_mask);
1499
1500 struct yac_basic_grid_data grid =
1502 (size_t)nbr_vertices, (int)nbr_cells, num_vertices_per_cell,
1503 x_vertices, y_vertices, cell_to_vertex);
1505 free(x_vertices);
1506 free(y_vertices);
1507 free(cell_to_vertex);
1508
1509 return grid;
1510}
1511
1513 char const * filename, char const * gridname) {
1514
1515 return
1517 gridname, yac_read_icon_basic_grid_data(filename));
1518}
1519
1520/* ---------------------------------------------------------------- */
1521
1522static int * get_icon_cell_mask ( int ncid, size_t nbr_cells ) {
1523
1524 // get variable id
1525 int mask_id;
1526 yac_nc_inq_varid (ncid, "cell_sea_land_mask", &mask_id);
1527
1528 // check number of dimension (has to be 1)
1529 int ndims;
1530 YAC_HANDLE_ERROR(nc_inq_varndims(ncid, mask_id, &ndims));
1531 YAC_ASSERT(
1532 ndims == 1,
1533 "ERROR(get_icon_cell_mask): mask array has more than one dimension")
1534
1535 // get id of dimension
1536 int dimid;
1537 YAC_HANDLE_ERROR(nc_inq_vardimid(ncid, mask_id, &dimid));
1538
1539 // check size of mask (has to be equal to nbr_cells)
1540 size_t dimlen;
1541 YAC_HANDLE_ERROR(nc_inq_dimlen(ncid, dimid, &dimlen));
1542 YAC_ASSERT(
1543 dimlen == nbr_cells,
1544 "ERROR(get_icon_cell_mask): invalid size of mask array")
1545
1546 // check mask type (has to be NC_INT)
1547 nc_type mask_type;
1548 YAC_HANDLE_ERROR(nc_inq_vartype(ncid, mask_id, &mask_type));
1549 YAC_ASSERT(
1550 mask_type == NC_INT, "ERROR(get_icon_cell_mask): invalid mask type")
1551
1552 // get and return mask
1553 int * cell_mask = xmalloc(nbr_cells * sizeof(*cell_mask));
1554 YAC_HANDLE_ERROR(nc_get_var_int (ncid, mask_id, cell_mask));
1555 return cell_mask;
1556}
1557
1558/* ---------------------------------------------------------------- */
1559
1561 int ncid, size_t nbr_cells, int ** vertex_of_cell, size_t * nv) {
1562
1563 // get variable id
1564 int conn_id;
1565 yac_nc_inq_varid(ncid, "vertex_of_cell", &conn_id);
1566
1567 // check number of dimension (has to be 1)
1568 int ndims;
1569 YAC_HANDLE_ERROR(nc_inq_varndims(ncid, conn_id, &ndims));
1570 YAC_ASSERT(
1571 ndims == 2,
1572 "ERROR(get_icon_connect): "
1573 "connectivity array has invalid number of dimensions")
1574
1575 // get ids of dimensions
1576 int dimids[2];
1577 YAC_HANDLE_ERROR(nc_inq_vardimid(ncid, conn_id, dimids));
1578
1579 // check size of dimensions
1580 size_t dimlen;
1581 YAC_HANDLE_ERROR(nc_inq_dimlen(ncid, dimids[0], nv));
1582 YAC_HANDLE_ERROR(nc_inq_dimlen(ncid, dimids[1], &dimlen));
1583 YAC_ASSERT(
1584 dimlen == nbr_cells,
1585 "ERROR(get_icon_connect): invalid size of second dimension of "
1586 "connectivity array (has to be nbr_cells)")
1587
1588 // get and return connectivity array
1589 *vertex_of_cell = xmalloc(*nv * nbr_cells * sizeof(**vertex_of_cell));
1590 YAC_HANDLE_ERROR(nc_get_var_int (ncid, conn_id, *vertex_of_cell));
1591}
1592
1593void yac_delete_icon_grid_data( int ** cell_mask,
1594 int ** global_cell_id,
1595 int ** global_cell_id_rank,
1596 int ** num_vertices_per_cell,
1597 int ** global_corner_id,
1598 int ** global_corner_id_rank,
1599 int ** cell_to_vertex,
1600 double ** x_cells,
1601 double ** y_cells,
1602 double ** x_vertices,
1603 double ** y_vertices) {
1604
1605 free (*cell_mask);
1606 free (*global_cell_id);
1607 free (*global_cell_id_rank);
1608 free (*num_vertices_per_cell);
1609 free (*global_corner_id);
1610 free (*global_corner_id_rank);
1611 free (*cell_to_vertex);
1612 free (*x_cells);
1613 free (*y_cells);
1614 free (*x_vertices);
1615 free (*y_vertices);
1616}
1617
1618#else
1619
1621 char const * filename) {
1622
1623 UNUSED(filename);
1624 die(
1625 "ERROR(yac_basic_grid_data): "
1626 "YAC is built without the NetCDF support");
1627
1628 return
1630 (size_t[]){0,0}, (int[]){0,0}, NULL, NULL);
1631}
1632
1634 char const * filename, char const * gridname) {
1635
1636 UNUSED(filename);
1637 UNUSED(gridname);
1638 die(
1639 "ERROR(yac_read_icon_basic_grid): "
1640 "YAC is built without the NetCDF support");
1641
1642 return NULL;
1643}
1644
1645void yac_read_icon_grid_information(const char * filename, int * nbr_vertices,
1646 int * nbr_cells, int ** num_vertices_per_cell,
1647 int ** cell_to_vertex, double ** x_vertices,
1648 double ** y_vertices, double ** x_cells,
1649 double ** y_cells, int ** cell_mask) {
1650
1651 UNUSED(filename);
1652 UNUSED(nbr_vertices);
1653 UNUSED(nbr_cells);
1654 UNUSED(num_vertices_per_cell);
1655 UNUSED(cell_to_vertex);
1656 UNUSED(x_vertices);
1657 UNUSED(y_vertices);
1658 UNUSED(x_cells);
1659 UNUSED(y_cells);
1660 UNUSED(cell_mask);
1661 die(
1662 "ERROR(yac_read_icon_grid_information): "
1663 "YAC is built without the NetCDF support");
1664}
1665
1667 const char * filename, int * num_vertices, int * num_cells,
1668 int ** num_vertices_per_cell, int ** cell_to_vertex,
1669 double ** x_vertices, double ** y_vertices,
1670 double ** x_cells, double ** y_cells, int ** global_cell_id,
1671 int ** cell_mask, int ** cell_core_mask,
1672 int ** global_corner_id, int ** corner_core_mask, int rank, int size) {
1673
1674 UNUSED(filename);
1675 UNUSED(num_vertices);
1676 UNUSED(num_cells);
1677 UNUSED(num_vertices_per_cell);
1678 UNUSED(cell_to_vertex);
1679 UNUSED(x_vertices);
1680 UNUSED(y_vertices);
1681 UNUSED(x_cells);
1682 UNUSED(y_cells);
1683 UNUSED(global_cell_id);
1684 UNUSED(cell_mask);
1685 UNUSED(cell_core_mask);
1686 UNUSED(global_corner_id);
1687 UNUSED(corner_core_mask);
1688 UNUSED(rank);
1689 UNUSED(size);
1690 die(
1691 "ERROR(yac_read_part_icon_grid_information): "
1692 "YAC is built without the NetCDF support");
1693}
1694
1696 const char * filename, MPI_Comm comm, int * nbr_vertices, int * nbr_cells,
1697 int ** num_vertices_per_cell, int ** cell_to_vertex, int ** global_cell_ids,
1698 int ** cell_owner, int ** global_vertex_ids, int ** vertex_owner,
1699 double ** x_vertices, double ** y_vertices,
1700 double ** x_cells, double ** y_cells, int ** cell_msk) {
1701
1702 UNUSED(filename);
1703 UNUSED(comm);
1704 UNUSED(nbr_vertices);
1705 UNUSED(nbr_cells);
1706 UNUSED(num_vertices_per_cell);
1707 UNUSED(cell_to_vertex);
1708 UNUSED(global_cell_ids);
1709 UNUSED(cell_owner);
1710 UNUSED(global_vertex_ids);
1711 UNUSED(vertex_owner);
1712 UNUSED(x_vertices);
1713 UNUSED(y_vertices);
1714 UNUSED(x_cells);
1715 UNUSED(y_cells);
1716 UNUSED(cell_msk);
1717 die(
1718 "ERROR(yac_read_icon_grid_information_parallel): "
1719 "YAC is built without the NetCDF support");
1720}
1721
1723 const char * filename, MPI_Comm comm) {
1724
1725 UNUSED(filename);
1726 UNUSED(comm);
1727 die(
1728 "ERROR(yac_read_icon_basic_grid_data_parallel): "
1729 "YAC is built without the NetCDF support");
1730
1731 return
1733 (size_t[]){0,0}, (int[]){0,0}, NULL, NULL);
1734}
1735
1737 char const * filename, char const * gridname, MPI_Comm comm) {
1738
1739 UNUSED(filename);
1740 UNUSED(gridname);
1741 UNUSED(comm);
1742 die(
1743 "ERROR(yac_read_icon_basic_grid_parallel): "
1744 "YAC is built without the NetCDF support");
1745
1746 return NULL;
1747}
1748
1749void yac_delete_icon_grid_data( int ** cell_mask,
1750 int ** global_cell_id,
1751 int ** cell_core_mask,
1752 int ** num_vertices_per_cell,
1753 int ** global_corner_id,
1754 int ** corner_core_mask,
1755 int ** cell_to_vertex,
1756 double ** x_cells,
1757 double ** y_cells,
1758 double ** x_vertices,
1759 double ** y_vertices) {
1760
1761 UNUSED(cell_mask);
1762 UNUSED(global_cell_id);
1763 UNUSED(cell_core_mask);
1764 UNUSED(num_vertices_per_cell);
1765 UNUSED(global_corner_id);
1766 UNUSED(corner_core_mask);
1767 UNUSED(cell_to_vertex);
1768 UNUSED(x_vertices);
1769 UNUSED(y_vertices);
1770 UNUSED(x_cells);
1771 UNUSED(y_cells);
1772 die(
1773 "ERROR(yac_delete_icon_grid_data): "
1774 "YAC is built without the NetCDF support");
1775}
1776
1777#endif // YAC_NETCDF_ENABLED
struct yac_basic_grid * yac_basic_grid_new(char const *name, struct yac_basic_grid_data grid_data)
Definition basic_grid.c:50
struct yac_basic_grid_data yac_generate_basic_grid_data_reg_2d(size_t nbr_vertices[2], int cyclic[2], double *lon_vertices, double *lat_vertices)
Definition grid_reg2d.c:65
struct yac_basic_grid_data yac_generate_basic_grid_data_unstruct(size_t nbr_vertices, size_t nbr_cells, int *num_vertices_per_cell, double *x_vertices, double *y_vertices, int *cell_to_vertex)
#define UNUSED(x)
Definition core.h:73
#define YAC_RAD
Definition geometry.h:30
static void LLtoXYZ(double lon, double lat, double p_out[])
Definition geometry.h:287
yac_edge_type
Definition grid_cell.h:12
@ YAC_GREAT_CIRCLE_EDGE
great circle
Definition grid_cell.h:13
void yac_get_io_ranks(MPI_Comm comm, int *local_is_io_, int **io_ranks_, int *num_io_ranks_)
Definition io_utils.c:309
void yac_nc_inq_varid(int ncid, char const *name, int *varidp)
Definition io_utils.c:411
void yac_nc_open(const char *path, int omode, int *ncidp)
Definition io_utils.c:350
void yac_nc_inq_dimid(int ncid, char const *name, int *dimidp)
Definition io_utils.c:385
#define YAC_HANDLE_ERROR(exp)
Definition io_utils.h:30
#define xrealloc(ptr, size)
Definition ppm_xfuncs.h:67
#define xcalloc(nmemb, size)
Definition ppm_xfuncs.h:64
#define xmalloc(size)
Definition ppm_xfuncs.h:66
int yac_check_coord_units(int ncid, int varid)
Definition read_grid.c:45
void yac_read_coords(int ncid, char const *lon_name, char const *lat_name, double **lon, double **lat, size_t *len)
Definition read_grid.c:94
struct yac_basic_grid * yac_read_icon_basic_grid_parallel_f2c(char const *filename, char const *gridname, MPI_Fint comm)
static void convert_to_rad(int ncid, int varid, double *array, size_t count)
void yac_delete_icon_grid_data(int **cell_mask, int **global_cell_id, int **global_cell_id_rank, int **num_vertices_per_cell, int **global_corner_id, int **global_corner_id_rank, int **cell_to_vertex, double **x_cells, double **y_cells, double **x_vertices, double **y_vertices)
struct yac_basic_grid * yac_read_icon_basic_grid(char const *filename, char const *gridname)
void yac_read_part_icon_grid_information(const char *filename, int *nbr_vertices, int *nbr_cells, int **num_vertices_per_cell, int **cell_to_vertex, double **x_vertices, double **y_vertices, double **x_cells, double **y_cells, int **global_cell_id, int **cell_mask, int **cell_core_mask, int **global_corner_id, int **corner_core_mask, int rank, int size)
struct yac_basic_grid_data yac_read_icon_basic_grid_data_parallel(const char *filename, MPI_Comm comm)
static int * get_icon_cell_mask(int ncid, size_t nbr_cells)
void yac_read_icon_grid_information_parallel(const char *filename, MPI_Comm comm, int *nbr_vertices, int *nbr_cells, int **num_vertices_per_cell, int **cell_to_vertex, int **global_cell_ids, int **cell_owner, int **global_vertex_ids, int **vertex_owner, double **x_vertices, double **y_vertices, double **x_cells, double **y_cells, int **cell_msk)
void yac_read_icon_grid_information_parallel_f2c(const char *filename, MPI_Fint comm, int *nbr_vertices, int *nbr_cells, int **num_vertices_per_cell, int **cell_to_vertex, int **global_cell_ids, int **cell_owner, int **global_vertex_ids, int **vertex_owner, double **x_vertices, double **y_vertices, double **x_cells, double **y_cells, int **cell_msk)
static int partition_idx_from_element_idx(unsigned element_idx, unsigned num_elements, int num_partitions)
static size_t * generate_offsets(size_t N, int *counts)
static int * generate_simple_core_mask(size_t N)
void yac_read_icon_grid_information(const char *filename, int *nbr_vertices, int *nbr_cells, int **num_vertices_per_cell, int **cell_to_vertex, double **x_vertices, double **y_vertices, double **x_cells, double **y_cells, int **cell_mask)
static void get_icon_connect(int ncid, size_t nbr_cells, int **vertex_of_cell, size_t *nv)
struct yac_basic_grid * yac_read_icon_basic_grid_parallel(char const *filename, char const *gridname, MPI_Comm comm)
struct yac_basic_grid_data yac_read_icon_basic_grid_data(char const *filename)
yac_coordinate_pointer vertex_coordinates
size_t * vertex_to_cell_offsets
yac_size_t_2_pointer edge_to_vertex
enum yac_edge_type * edge_type
size_t * cell_to_vertex_offsets
static void yac_remove_duplicates_int(int *array, size_t *n)
#define MAX(a, b)
void yac_quicksort_index_yac_int_size_t(yac_int *a, size_t n, size_t *idx)
void yac_quicksort_index_int_size_t(int *a, size_t n, size_t *idx)
void yac_quicksort_index(int *a, size_t n, int *idx)
#define die(msg)
Definition yac_assert.h:12
#define YAC_ASSERT(exp, msg)
Definition yac_assert.h:16
Xt_int yac_int
Definition yac_types.h:15
size_t(* yac_size_t_2_pointer)[2]
Definition yac_types.h:23
#define yac_int_dt
Definition yac_types.h:16
double(* yac_coordinate_pointer)[3]
Definition yac_types.h:19