1!--------------------------------------------------------------------------------------------------!
2!   CP2K: A general program to perform molecular dynamics simulations                              !
3!   Copyright (C) 2000 - 2020  CP2K developers group                                               !
4!--------------------------------------------------------------------------------------------------!
5
6! **************************************************************************************************
7!> \brief A wrapper around pw_to_cube() which accepts particle_list_type
8!> \author Ole Schuett
9! **************************************************************************************************
10MODULE cp_realspace_grid_cube
11   USE atomic_kind_types,               ONLY: get_atomic_kind
12   USE cp_output_handling,              ONLY: cp_mpi_io_get
13   USE kinds,                           ONLY: dp
14   USE particle_list_types,             ONLY: particle_list_type
15   USE pw_types,                        ONLY: pw_type
16   USE realspace_grid_cube,             ONLY: cube_to_pw,&
17                                              pw_to_cube,&
18                                              pw_to_simple_volumetric
19#include "./base/base_uses.f90"
20
21   IMPLICIT NONE
22
23   PRIVATE
24
25   PUBLIC :: cp_pw_to_cube, cp_pw_to_simple_volumetric, cp_cube_to_pw
26
27   CHARACTER(len=*), PARAMETER, PRIVATE :: moduleN = 'cp_realspace_grid_cube'
28
29CONTAINS
30
31! **************************************************************************************************
32!> \brief ...
33!> \param pw ...
34!> \param unit_nr ...
35!> \param title ...
36!> \param particles ...
37!> \param stride ...
38!> \param zero_tails ...
39!> \param mpi_io True if cube should be written in parallel using MPI
40! **************************************************************************************************
41   SUBROUTINE cp_pw_to_cube(pw, unit_nr, title, particles, stride, zero_tails, mpi_io)
42      TYPE(pw_type), POINTER                             :: pw
43      INTEGER, INTENT(IN)                                :: unit_nr
44      CHARACTER(*), INTENT(IN), OPTIONAL                 :: title
45      TYPE(particle_list_type), POINTER                  :: particles
46      INTEGER, DIMENSION(:), OPTIONAL, POINTER           :: stride
47      LOGICAL, INTENT(IN), OPTIONAL                      :: zero_tails, mpi_io
48
49      CHARACTER(len=*), PARAMETER :: routineN = 'cp_pw_to_cube', routineP = moduleN//':'//routineN
50
51      INTEGER                                            :: i, n
52      INTEGER, ALLOCATABLE, DIMENSION(:)                 :: particles_z
53      REAL(KIND=dp), ALLOCATABLE, DIMENSION(:, :)        :: particles_r
54      TYPE(particle_list_type), POINTER                  :: my_particles
55
56      NULLIFY (my_particles)
57      my_particles => particles
58      IF (ASSOCIATED(my_particles)) THEN
59         n = my_particles%n_els
60         ALLOCATE (particles_z(n))
61         ALLOCATE (particles_r(3, n))
62         DO i = 1, n
63            CALL get_atomic_kind(my_particles%els(i)%atomic_kind, z=particles_z(i))
64            particles_r(:, i) = my_particles%els(i)%r(:)
65         END DO
66
67         CALL pw_to_cube(pw=pw, unit_nr=unit_nr, title=title, &
68                         particles_z=particles_z, particles_r=particles_r, &
69                         stride=stride, zero_tails=zero_tails, &
70                         mpi_io=mpi_io)
71      ELSE
72         CALL pw_to_cube(pw=pw, unit_nr=unit_nr, title=title, &
73                         stride=stride, zero_tails=zero_tails, &
74                         mpi_io=mpi_io)
75      END IF
76
77   END SUBROUTINE cp_pw_to_cube
78
79! **************************************************************************************************
80!> \brief Prints grid in a simple format: X Y Z value
81!> \param pw ...
82!> \param unit_nr ...
83!> \param stride ...
84!> \param pw2 ...
85!> \par History
86!>      Created [Vladimir Rybkin] (08.2017)
87! **************************************************************************************************
88   SUBROUTINE cp_pw_to_simple_volumetric(pw, unit_nr, stride, pw2)
89      TYPE(pw_type), POINTER                             :: pw
90      INTEGER, INTENT(IN)                                :: unit_nr
91      INTEGER, DIMENSION(:), OPTIONAL, POINTER           :: stride
92      TYPE(pw_type), OPTIONAL, POINTER                   :: pw2
93
94      CHARACTER(len=*), PARAMETER :: routineN = 'cp_pw_to_simple_volumetric', &
95         routineP = moduleN//':'//routineN
96
97      IF (.NOT. PRESENT(pw2)) THEN
98         CALL pw_to_simple_volumetric(pw, unit_nr, stride)
99      ELSE
100         CALL pw_to_simple_volumetric(pw, unit_nr, stride, pw2)
101      ENDIF
102
103   END SUBROUTINE cp_pw_to_simple_volumetric
104
105! **************************************************************************************************
106!> \brief Thin wrapper around routine cube_to_pw
107!> \param grid     pw to read from cube file
108!> \param filename name of cube file
109!> \param scaling  scale values before storing
110!> \par History
111!>      Created [Nico Holmberg] (09.2018)
112! **************************************************************************************************
113   SUBROUTINE cp_cube_to_pw(grid, filename, scaling)
114      TYPE(pw_type), POINTER                             :: grid
115      CHARACTER(len=*), INTENT(in)                       :: filename
116      REAL(kind=dp), INTENT(in)                          :: scaling
117
118      CHARACTER(len=*), PARAMETER :: routineN = 'cp_cube_to_pw', routineP = moduleN//':'//routineN
119
120      LOGICAL                                            :: parallel_read
121
122      ! Determine whether to use MPI I/O for reading cube filename
123      parallel_read = .TRUE.
124      ! Parallel routine falls back to stream read in serial mode,
125      ! but it has slight overhead compared to sequential read
126      ! Therefore, we use sequential version in serial mode
127      IF (grid%pw_grid%para%group_size == 1) parallel_read = .FALSE.
128      ! Check if MPI I/O was disabled in GLOBAL section
129      IF (.NOT. cp_mpi_io_get()) parallel_read = .FALSE.
130
131      CALL cube_to_pw(grid, filename, scaling, parallel_read)
132
133   END SUBROUTINE cp_cube_to_pw
134
135END MODULE cp_realspace_grid_cube
136