|
| 1 | +// *********************************************************************************** |
| 2 | +// Idefix MHD astrophysical code |
| 3 | +// Copyright(C) Geoffroy R. J. Lesur <[email protected]> |
| 4 | +// and other code contributors |
| 5 | +// Licensed under CeCILL 2.1 License, see COPYING for more information |
| 6 | +// *********************************************************************************** |
| 7 | + |
| 8 | + |
| 9 | +#include <vector> |
| 10 | +#include <string> |
| 11 | + |
| 12 | +#include "column.hpp" |
| 13 | +#include "idefix.hpp" |
| 14 | +#include "input.hpp" |
| 15 | +#include "output.hpp" |
| 16 | +#include "grid.hpp" |
| 17 | +#include "dataBlock.hpp" |
| 18 | +#include "dataBlockHost.hpp" |
| 19 | + |
| 20 | +Column::Column(int dir, int sign, DataBlock *data) |
| 21 | + : direction(dir), sign(sign) { |
| 22 | + idfx::pushRegion("Column::Column"); |
| 23 | + this->np_tot = data->np_tot; |
| 24 | + this->np_int = data->np_int; |
| 25 | + this->beg = data->beg; |
| 26 | + |
| 27 | + if(dir>= DIMENSIONS || dir < IDIR) IDEFIX_ERROR("Unknown direction for Column constructor"); |
| 28 | + |
| 29 | + // Allocate the array on which we do the average |
| 30 | + this->ColumnArray = IdefixArray3D<real>("ColumnArray",np_tot[KDIR], np_tot[JDIR], np_tot[IDIR]); |
| 31 | + |
| 32 | + // Elementary volumes and area |
| 33 | + this->Volume = data->dV; |
| 34 | + this->Area = data->A[dir]; |
| 35 | + |
| 36 | + // allocate helper array |
| 37 | + if(dir == IDIR) { |
| 38 | + localSum = IdefixArray2D<real>("localSum",np_tot[KDIR], np_tot[JDIR]); |
| 39 | + } |
| 40 | + if(dir == JDIR) { |
| 41 | + localSum = IdefixArray2D<real>("localSum",np_tot[KDIR], np_tot[IDIR]); |
| 42 | + } |
| 43 | + if(dir == KDIR) { |
| 44 | + localSum = IdefixArray2D<real>("localSum",np_tot[JDIR], np_tot[IDIR]); |
| 45 | + } |
| 46 | + #ifdef WITH_MPI |
| 47 | + // Create sub-MPI communicator dedicated to scan |
| 48 | + int remainDims[3] = {false, false, false}; |
| 49 | + remainDims[dir] = true; |
| 50 | + MPI_Cart_sub(data->mygrid->CartComm, remainDims, &this->ColumnComm); |
| 51 | + MPI_Comm_rank(this->ColumnComm, &this->MPIrank); |
| 52 | + MPI_Comm_size(this->ColumnComm, &this->MPIsize); |
| 53 | + |
| 54 | + // create MPI class for boundary Xchanges |
| 55 | + int ntarget = 0; |
| 56 | + std::vector<int> mapVars; |
| 57 | + mapVars.push_back(ntarget); |
| 58 | + |
| 59 | + this->mpi.Init(data->mygrid, mapVars, data->nghost.data(), data->np_int.data()); |
| 60 | + this->nproc = data->mygrid->nproc; |
| 61 | + #endif |
| 62 | + idfx::popRegion(); |
| 63 | +} |
| 64 | + |
| 65 | +void Column::ComputeColumn(IdefixArray4D<real> in, const int var) { |
| 66 | + idfx::pushRegion("Column::ComputeColumn"); |
| 67 | + const int nk = np_int[KDIR]; |
| 68 | + const int nj = np_int[JDIR]; |
| 69 | + const int ni = np_int[IDIR]; |
| 70 | + const int kb = beg[KDIR]; |
| 71 | + const int jb = beg[JDIR]; |
| 72 | + const int ib = beg[IDIR]; |
| 73 | + const int ke = kb+nk; |
| 74 | + const int je = jb+nj; |
| 75 | + const int ie = ib+ni; |
| 76 | + |
| 77 | + const int direction = this->direction; |
| 78 | + auto column = this->ColumnArray; |
| 79 | + auto dV = this->Volume; |
| 80 | + auto A = this->Area; |
| 81 | + auto localSum = this->localSum; |
| 82 | + |
| 83 | + if(direction==IDIR) { |
| 84 | + // Inspired from loop.hpp |
| 85 | + Kokkos::parallel_for("ColumnX1", team_policy (nk*nj, Kokkos::AUTO), |
| 86 | + KOKKOS_LAMBDA (member_type team_member) { |
| 87 | + int k = team_member.league_rank() / nj; |
| 88 | + int j = team_member.league_rank() - k*nj + jb; |
| 89 | + k += kb; |
| 90 | + Kokkos::parallel_scan(Kokkos::TeamThreadRange<>(team_member,ib,ie), |
| 91 | + [=] (int i, real &partial_sum, bool is_final) { |
| 92 | + partial_sum += in(var,k,j,i)*dV(k,j,i) / (0.5*(A(k,j,i)+A(k,j,i+1))); |
| 93 | + if(is_final) column(k,j,i) = partial_sum; |
| 94 | + //partial_sum += in(var,k,j,i)*dV(k,j,i) / (0.5*(A(k,j,i)+A(k,j,i+1))); |
| 95 | + }); |
| 96 | + }); |
| 97 | + } |
| 98 | + if(direction==JDIR) { |
| 99 | + // Inspired from loop.hpp |
| 100 | + Kokkos::parallel_for("ColumnX2", team_policy (nk*ni, Kokkos::AUTO), |
| 101 | + KOKKOS_LAMBDA (member_type team_member) { |
| 102 | + int k = team_member.league_rank() / ni; |
| 103 | + int i = team_member.league_rank() - k*ni + ib; |
| 104 | + k += kb; |
| 105 | + Kokkos::parallel_scan(Kokkos::TeamThreadRange<>(team_member,jb,je), |
| 106 | + [=] (int j, real &partial_sum, bool is_final) { |
| 107 | + partial_sum += in(var,k,j,i)*dV(k,j,i) / (0.5*(A(k,j,i)+A(k,j+1,i))); |
| 108 | + if(is_final) column(k,j,i) = partial_sum; |
| 109 | + }); |
| 110 | + }); |
| 111 | + } |
| 112 | + if(direction==KDIR) { |
| 113 | + // Inspired from loop.hpp |
| 114 | + Kokkos::parallel_for("ColumnX2", team_policy (nj*ni, Kokkos::AUTO), |
| 115 | + KOKKOS_LAMBDA (member_type team_member) { |
| 116 | + int j = team_member.league_rank() / ni; |
| 117 | + int i = team_member.league_rank() - j*ni + ib; |
| 118 | + j += jb; |
| 119 | + Kokkos::parallel_scan(Kokkos::TeamThreadRange<>(team_member,kb,ke), |
| 120 | + [=] (int k, real &partial_sum, bool is_final) { |
| 121 | + partial_sum += in(var,k,j,i)*dV(k,j,i) / (0.5*(A(k,j,i)+A(k+1,j,i))); |
| 122 | + if(is_final) column(k,j,i) = partial_sum; |
| 123 | + }); |
| 124 | + }); |
| 125 | + } |
| 126 | + |
| 127 | + #ifdef WITH_MPI |
| 128 | + // Load the current sum |
| 129 | + int dst,src; |
| 130 | + MPI_Cart_shift(this->ColumnComm,0, 1, &src, &dst); |
| 131 | + int size = localSum.extent(0)*localSum.extent(1); |
| 132 | + if(MPIrank>0) { |
| 133 | + MPI_Status status; |
| 134 | + // Get the cumulative sum from previous processes |
| 135 | + Kokkos::fence(); |
| 136 | + MPI_Recv(localSum.data(), size, realMPI, src, 20, ColumnComm, &status); |
| 137 | + // Add this to our cumulative sum |
| 138 | + idefix_for("Addsum",kb,ke,jb,je,ib,ie, |
| 139 | + KOKKOS_LAMBDA(int k, int j, int i) { |
| 140 | + if(direction == IDIR) column(k,j,i) += localSum(k,j); |
| 141 | + if(direction == JDIR) column(k,j,i) += localSum(k,i); |
| 142 | + if(direction == KDIR) column(k,j,i) += localSum(j,i); |
| 143 | + }); |
| 144 | + } // rank =0 |
| 145 | + // Get the final sum |
| 146 | + if(MPIrank < MPIsize-1) { |
| 147 | + if(direction==IDIR) { |
| 148 | + idefix_for("Loadsum",kb,ke,jb,je, |
| 149 | + KOKKOS_LAMBDA(int k, int j) { |
| 150 | + localSum(k,j) = column(k,j,ie-1); |
| 151 | + }); |
| 152 | + } |
| 153 | + if(direction==JDIR) { |
| 154 | + idefix_for("Loadsum",kb,ke,ib,ie, |
| 155 | + KOKKOS_LAMBDA(int k, int i) { |
| 156 | + localSum(k,i) = column(k,je-1,i); |
| 157 | + }); |
| 158 | + } |
| 159 | + if(direction==KDIR) { |
| 160 | + idefix_for("Loadsum",jb,je,ib,ie, |
| 161 | + KOKKOS_LAMBDA(int j, int i) { |
| 162 | + localSum(j,i) = column(ke-1,j,i); |
| 163 | + }); |
| 164 | + } |
| 165 | + // And send it |
| 166 | + Kokkos::fence(); |
| 167 | + MPI_Send(localSum.data(),size, realMPI, dst, 20, ColumnComm); |
| 168 | + } // MPIrank small enough |
| 169 | + #endif |
| 170 | + // If we need it backwards |
| 171 | + if(sign<0) { |
| 172 | + // Compute total cumulative sum in the last subdomain |
| 173 | + if(direction == IDIR) { |
| 174 | + idefix_for("Loadsum",kb,ke,jb,je, |
| 175 | + KOKKOS_LAMBDA(int k, int j) { |
| 176 | + localSum(k,j) = column(k,j,ie-1) + in(var,k,j,ie-1) * dV(k,j,ie-1) |
| 177 | + / (0.5*(A(k,j,ie-1)+A(k,j,ie))); |
| 178 | + }); |
| 179 | + } |
| 180 | + if(direction == JDIR) { |
| 181 | + idefix_for("Loadsum",kb,ke,ib,ie, |
| 182 | + KOKKOS_LAMBDA(int k, int i) { |
| 183 | + localSum(k,i) = column(k,je-1,i) + in(var,k,je-1,i) * dV(k,je-1,i) |
| 184 | + / (0.5*(A(k,je-1,i)+A(k,je,i))); |
| 185 | + }); |
| 186 | + } |
| 187 | + if(direction == KDIR) { |
| 188 | + idefix_for("Loadsum",jb,je,ib,ie, |
| 189 | + KOKKOS_LAMBDA(int j, int i) { |
| 190 | + localSum(j,i) = column(ke-1,j,i) + in(var,ke-1,j,i) * dV(ke-1,j,i) |
| 191 | + / (0.5*(A(ke-1,j,i)+A(ke,j,i))); |
| 192 | + }); |
| 193 | + } |
| 194 | + |
| 195 | + #ifdef WITH_MPI |
| 196 | + Kokkos::fence(); |
| 197 | + MPI_Bcast(localSum.data(),size, realMPI, MPIsize-1, ColumnComm); |
| 198 | + #endif |
| 199 | + // All substract the local column from the full column |
| 200 | + |
| 201 | + idefix_for("Subsum",kb,ke,jb,je,ib,ie, |
| 202 | + KOKKOS_LAMBDA(int k, int j, int i) { |
| 203 | + if(direction==IDIR) column(k,j,i) = localSum(k,j)-column(k,j,i); |
| 204 | + if(direction==JDIR) column(k,j,i) = localSum(k,i)-column(k,j,i); |
| 205 | + if(direction==KDIR) column(k,j,i) = localSum(j,i)-column(k,j,i); |
| 206 | + }); |
| 207 | + } // sign <0 |
| 208 | + // Xchange boundary elements when using MPI to ensure that column |
| 209 | + // density in the ghost zones are coherent |
| 210 | + #ifdef WITH_MPI |
| 211 | + // Create a 4D array that contains our column data |
| 212 | + IdefixArray4D<real> arr4D(column.data(), 1, this->np_tot[KDIR], |
| 213 | + this->np_tot[JDIR], |
| 214 | + this->np_tot[IDIR]); |
| 215 | + |
| 216 | + for(int dir = 0 ; dir < DIMENSIONS ; dir++) { |
| 217 | + // MPI Exchange data when needed |
| 218 | + if(nproc[dir]>1) { |
| 219 | + switch(dir) { |
| 220 | + case 0: |
| 221 | + this->mpi.ExchangeX1(arr4D); |
| 222 | + break; |
| 223 | + case 1: |
| 224 | + this->mpi.ExchangeX2(arr4D); |
| 225 | + break; |
| 226 | + case 2: |
| 227 | + this->mpi.ExchangeX3(arr4D); |
| 228 | + break; |
| 229 | + } |
| 230 | + } |
| 231 | + } |
| 232 | + #endif |
| 233 | + idfx::popRegion(); |
| 234 | +} |
| 235 | + |
| 236 | +void Column::ComputeColumn(IdefixArray3D<real> in) { |
| 237 | + // 4D alias |
| 238 | + IdefixArray4D<real> arr4D(in.data(), 1, in.extent(0), in.extent(1), in.extent(2)); |
| 239 | + return this->ComputeColumn(arr4D,0); |
| 240 | +} |
0 commit comments