stardis-solver

Solve coupled heat transfers
git clone git://git.meso-star.fr/stardis-solver.git
Log | Files | Refs | README | LICENSE

sdis_c.h (7529B)


      1 /* Copyright (C) 2016-2025 |Méso|Star> (contact@meso-star.com)
      2  *
      3  * This program is free software: you can redistribute it and/or modify
      4  * it under the terms of the GNU General Public License as published by
      5  * the Free Software Foundation, either version 3 of the License, or
      6  * (at your option) any later version.
      7  *
      8  * This program is distributed in the hope that it will be useful,
      9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
     10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
     11  * GNU General Public License for more details.
     12  *
     13  * You should have received a copy of the GNU General Public License
     14  * along with this program. If not, see <http://www.gnu.org/licenses/>. */
     15 
     16 #ifndef SDIS_C_H
     17 #define SDIS_C_H
     18 
     19 #include <star/ssp.h>
     20 #include <rsys/hash.h>
     21 #include <rsys/rsys.h>
     22 
     23 /* Id of the messages sent between processes */
     24 enum mpi_sdis_message {
     25   MPI_SDIS_MSG_ACCUM_TEMP, /* Temperature accumulator */
     26   MPI_SDIS_MSG_ACCUM_TIME, /* Time accumulator */
     27   MPI_SDIS_MSG_ACCUM_FLUX_CONVECTIVE, /* Convective flux accumulator */
     28   MPI_SDIS_MSG_ACCUM_FLUX_IMPOSED, /* Imposed flux accumulator */
     29   MPI_SDIS_MSG_ACCUM_FLUX_RADIATIVE, /* Radiative flux accumulator */
     30   MPI_SDIS_MSG_ACCUM_FLUX_TOTAL, /* Total flux accumulator */
     31   MPI_SDIS_MSG_ACCUM_MEAN_POWER, /* Mean power accumulator */
     32   MPI_SDIS_MSG_GREEN_FUNCTION, /* Serialized green function */
     33   MPI_SDIS_MSG_PROGRESS, /* Progress status */
     34   MPI_SDIS_MSG_RES_T, /* Result status */
     35   MPI_SDIS_MSG_RNG_PROXY_SEQUENCE_ID, /* Index of the current RNG sequence */
     36   MPI_SDIS_MSG_TILE, /* 2D Tile of row ordered accumulators */
     37   MPI_SDIS_MSG_COUNT__
     38 };
     39 
     40 /* Forward declarations */
     41 struct accum;
     42 struct sdis_device;
     43 struct sdis_estimator;
     44 struct sdis_green_function;
     45 struct sdis_scene;
     46 struct ssp_rng;
     47 struct ssp_rng_proxy;
     48 
     49 extern LOCAL_SYM res_T
     50 create_per_thread_rng
     51   (struct sdis_device* dev,
     52    struct ssp_rng* rng_state, /* May be NULL */
     53    const enum ssp_rng_type rng_type, /* RNG type when `rng_state' is NULL */
     54    struct ssp_rng_proxy** rng_proxy,
     55    struct ssp_rng** rngs[]);
     56 
     57 extern LOCAL_SYM void
     58 release_per_thread_rng
     59   (struct sdis_device* dev,
     60    struct ssp_rng* rngs[]);
     61 
     62 extern LOCAL_SYM res_T
     63 create_per_thread_green_function
     64   (struct sdis_scene* scene,
     65    const hash256_T signature,
     66    struct sdis_green_function** greens[]);
     67 
     68 extern LOCAL_SYM void
     69 release_per_thread_green_function
     70   (struct sdis_scene* scn,
     71    struct sdis_green_function* greens[]);
     72 
     73 /* Allocate the progress status list for the current process. Without MPI, the
     74  * length of the progress list is 1. With MPI, the length is also 1 except for
     75  * the master process for which the length of the list is equal to the number
     76  * of MPI processes. For this process the list will be used to gather the
     77  * progress status of the other processes. */
     78 extern LOCAL_SYM res_T
     79 alloc_process_progress
     80   (struct sdis_device* dev,
     81    int32_t* progress[]);
     82 
     83 extern LOCAL_SYM void
     84 free_process_progress
     85   (struct sdis_device* dev,
     86    int32_t progress[]);
     87 
     88 /* Calculate the index range of the current process. It returns the size of the
     89  * range. The overall_count is the number of calculations to parallelize between
     90  * processes. For example, it may be the number of realisations of one
     91  * calculation, or the total number of probe calculations. */
     92 extern LOCAL_SYM size_t
     93 compute_process_index_range
     94   (const struct sdis_device* dev,
     95    const size_t overall_count,
     96    size_t range[2]); /* [lower, upper[ */
     97 
     98 /* Return the number of realisations for the current process */
     99 static INLINE size_t
    100 compute_process_realisations_count
    101   (const struct sdis_device* dev,
    102    const size_t overall_realisations_count)
    103 {
    104   size_t range[2];
    105   return compute_process_index_range(dev, overall_realisations_count, range);
    106 }
    107 
    108 /* Gather the accumulators and sum them in acc. With MPI, non master processes
    109  * store in acc the gathering of their per thread accumulators that are sent to
    110  * the master process. The master process gathers the per thread accumulators
    111  * and the per process ones and save the result in acc */
    112 extern LOCAL_SYM res_T
    113 gather_accumulators
    114   (struct sdis_device* dev,
    115    const enum mpi_sdis_message msg,
    116    const struct accum* per_thread_acc,
    117    struct accum* acc);
    118 
    119 /* Collect accumulators evaluated over multiple processes, with each accumulator
    120  * storing a complete Monte Carlo calculation. Without MPI, nothing happens
    121  * since the per_probe_acc variable already stores the entire list of
    122  * accumulators. With MPI, non-master processes send their list of accumulators
    123  * to the master process which saves them in the per_probe_acc, after its
    124  * accumulators that it has managed, sorted against the identifiers of the
    125  * probes listed in process_probes. */
    126 extern LOCAL_SYM res_T
    127 gather_accumulators_list
    128   (struct sdis_device* dev,
    129    const enum mpi_sdis_message msg,
    130    const size_t nprobes, /* Total number of probes */
    131    const size_t process_probes[2], /* Ids of the probes managed by the process */
    132    struct accum* per_probe_acc); /* List of per probe accumulators */
    133 
    134 /* Gather the green functions. With MPI, non master processes store in green
    135  * the gathering of their per thread green functions and sent the result to the
    136  * master process. The master process gathers both per thread green functions
    137  * and per process ones and finally save the result in green */
    138 extern LOCAL_SYM res_T
    139 gather_green_functions
    140   (struct sdis_scene* scn,
    141    struct ssp_rng_proxy* proxy,
    142    struct sdis_green_function* per_thread_green[],
    143    const struct accum* acc_time,
    144    struct sdis_green_function** green);
    145 
    146 /* Gather the sequence IDs of the proxy RNGs. Without MPI, nothing happens.
    147  * With MPI, non-master processes send the sequence ID of their proxy RNG to
    148  * the master process. The master process updates its proxy RNG to ensure that
    149  * its state is greater than the state of all other proxies, that is, its
    150  * sequence ID is greater than the sequence IDs received. */
    151 extern LOCAL_SYM res_T
    152 gather_rng_proxy_sequence_id
    153   (struct sdis_device* dev,
    154    struct ssp_rng_proxy* proxy);
    155 
    156 /* Gather `res' from all other processes. Without MPI, the function simply
    157  * returns `res'. With MPI, each process sends `res' to the other processes and
    158  * retrieves the `res' sent by the other processes. The function then returns
    159  * RES_OK if all the results collected are RES_OK. Otherwise, it returns the
    160  * first error received. */
    161 extern LOCAL_SYM res_T
    162 gather_res_T
    163   (struct sdis_device* dev,
    164    const res_T res);
    165 
    166 /* Print the progress status. With MPI, the master process print the progress
    167  * of all processes stored in the progress list. Non master processes do not
    168  * print anything */
    169 extern LOCAL_SYM void
    170 print_progress
    171   (struct sdis_device* dev,
    172    int32_t progress[],
    173    const char* label); /* Text preceding the progress status */
    174 
    175 /* Update the printed progress status, i.e. rewind the printing and print the
    176  * new status */
    177 extern LOCAL_SYM void
    178 print_progress_update
    179   (struct sdis_device* dev,
    180    int32_t progress[],
    181    const char* label); /* Text preceding the progress status */
    182 
    183 /* Print progress completion, i.e. rewind the printing and print 100% */
    184 extern LOCAL_SYM void
    185 print_progress_completion
    186   (struct sdis_device* dev,
    187    int32_t progress[],
    188    const char* label); /* Text preceding the progress status */
    189 
    190 /* Waiting for all processes. Without MPI this function does nothing. With MPI
    191  * it waits for MPI process synchronisation */
    192 extern LOCAL_SYM void
    193 process_barrier
    194   (struct sdis_device* dev);
    195 
    196 #endif /* SDIS_C_H */