1# -*- coding: utf-8 -*-
2
3"""
4This module allows to adjust some parameters according to
5the type of execution.
6
7For aster5 cluster.
8
9These asrun customizations are called through (in asrun configuration file) :
10
11    schema_calcul : plugins.aster5.modifier
12
13    schema_execute : plugins.aster5.change_command_line
14"""
15
16import os
17
18from asrun.core import magic
19from asrun.runner import Runner
20from asrun.build import AsterBuild
21from asrun.config import build_config_of_version
22from asrun.common_func import get_tmpname
23from asrun.plugins.generic_func import getCpuParameters, setDistrLimits
24
25
26# memory (MB) added to memjob for testcases
27MEMSUP = 0
28
29
30def modifier(calcul):
31    """Call elementary functions to adjust :
32        - batch parameters,
33        - TODO submit interactive mpi execution in interactive queues.
34    Argument : ASTER_CALCUL object
35    Return value : ASTER_PROFIL object."""
36    serv = calcul.serv
37    prof = calcul.prof
38    if prof['mode'][0] == 'batch':
39        prof = change_batch_parameters(serv, prof)
40    setDistrLimits(prof, 512, 200 * 3600 - 1, 'batch')
41    return prof
42
43def getCpuParametersLocal(prof):
44    """Force to use all available threads for OpenMP AND Blas.
45    See `asrun.plugins.generic_func.getCpuParameters` function."""
46    # Fix the number of physical processors (2) & cores per processor (12).
47    cpu_mpi, node_mpi, cpu_openmp, blas_thread = getCpuParameters(2, 12, prof)
48    cpu_openmp = cpu_openmp * blas_thread
49    return cpu_mpi, node_mpi, cpu_openmp, blas_thread
50
51def change_batch_parameters(serv, prof):
52    """Change the batch parameters in an export object (classe...)."""
53    cpu_mpi, node_mpi, cpu_openmp, blas_thread = getCpuParametersLocal(prof)
54    cpu_per_node = 1. * cpu_mpi / node_mpi
55
56    # change job queue if :
57    #  - it's a study and the batch queue is not defined
58    #  - or it's a testcase.
59    DEFAULT_QUEUE = 'cn64'
60    g0 = group = prof['classe'][0]
61    if group == '':
62        # by default : prod
63        group = DEFAULT_QUEUE
64    batch_custom = "--wckey=P11YB:ASTER"
65
66    # add MEMSUP MB
67    if not 'distribution' in prof['actions']:
68        prof['memjob'] = int(float(prof['memjob'][0])) + MEMSUP * 1024
69    if 'astout' in prof['actions']:
70        prof['memjob'] = 1000*1024
71        prof['tpsjob'] = 60*24
72    if cpu_mpi > 1:
73        # should allow ncpu=2, node=1 but it does not work.
74        batch_custom += ' --exclusive'
75        # --nodes is now required, even if it is equal to 1
76        if not prof['mpi_nbnoeud'][0]:
77            prof['mpi_nbnoeud'] = 1
78    else:
79        # --nodes must not be set in sequential
80        if prof['mpi_nbnoeud'][0]:
81            prof['mpi_nbnoeud'] = ""
82
83    memory_limit = float(prof['memjob'][0]) / 1024.
84    prof.set_param_memory(memory_limit)
85    prof['memoryNode'] = memory_limit * cpu_per_node
86    # memory per node in GB
87    memGB = memory_limit * cpu_per_node / 1024.
88    if memGB > 256:
89        group = 'cn512'
90    elif memGB > 64:
91        group = 'cn256'
92    else:
93        group = 'cn64'
94
95    # time limit in hour
96    tpsjob = float(prof['tpsjob'][0]) * 60. / 3600.
97    if tpsjob > 200 and group not in ('cn64', 'urgent'):
98        group = 'cn64'
99
100    # special hook for performance testcases with 1 cpu
101    if cpu_mpi == 1 and 'performance' in prof['testlist'][0] and memGB < 64:
102        batch_custom += ' --exclusive'
103
104    # allocate all the available cores if not given in export
105    if not prof['ncpus'][0]:
106        magic.run.DBG("Change number of threads: %s" % cpu_openmp)
107        # prof['ncpus'] = cpu_openmp
108        prof['ncpus'] = min([6, cpu_openmp])
109    else:
110        prof['ncpus'] = int(prof['ncpus'][0])
111
112    # general - see https://computing.llnl.gov/linux/slurm/cpu_management.html
113    batch_custom += (' --cpus-per-task={0} --threads-per-core=1 '
114        '--distribution=block:block --mem_bind=local').format(prof['ncpus'][0])
115
116    if g0 == 'urgent':
117        batch_custom += ' --qos=urgent'
118
119    prof['batch_custom'] = batch_custom
120    if group != g0:
121        prof['classe'] = group
122        magic.run.DBG("Change batch queue group to : %s" % group)
123    return prof
124
125
126def change_command_line(prof):
127    """Change mpirun command line and arguments.
128    Argument : ASTER_PROFIL object
129    Return value : derivated of Runner class.
130    """
131    cpu_mpi, node_mpi, cpu_openmp, blas_thread = getCpuParametersLocal(prof)
132    # for compatibility with version < 13.1
133    use_numthreads = False
134    vers = prof['version'][0]
135    if vers:
136        conf = build_config_of_version(magic.run, vers, error=False)
137        if conf:
138            build = AsterBuild(magic.run, conf)
139            use_numthreads = build.support('use_numthreads')
140    if not use_numthreads:
141        cpu_openmp = prof['ncpus'][0]
142    # end of compatibility block
143
144
145    class ModifiedRunner(Runner):
146        """Modified Runner to export some variables before execution"""
147
148        def get_exec_command(self, cmd_in, add_tee=False, env=None):
149            """Return command to run Code_Aster.
150            Export specific variables for Intel MKL"""
151            cmd = Runner.get_exec_command(self, cmd_in, add_tee, env)
152            cmd = (
153                "export OMP_NUM_THREADS={openmp} ; "
154                "export MKL_NUM_THREADS={blas} ; "
155                "export I_MPI_PIN_DOMAIN=omp:compact ; "
156            ).format(openmp=cpu_openmp, blas=blas_thread) + cmd
157            return cmd
158
159    return ModifiedRunner
160