34 #include "Utils/clusterspecifier.h" 45 int Parallel::m_numProcs;
46 int Parallel::m_activeProcs;
47 bool Parallel::m_isActive;
48 MPI_Comm Parallel::m_cartCoords;
49 MPI_Comm Parallel::m_cartCoordComm;
50 std::array<int, 4> Parallel::m_subBlocks;
51 std::array<int, 4> Parallel::m_rankCoord;
52 std::array<int, 4> Parallel::m_latticeSubSize;
53 std::array<int, 4> Parallel::m_latticeFullSize;
54 std::array<int, 4> Parallel::m_parity;
55 std::array< std::array<int, 2>, 4> Parallel::m_neighbor;
56 std::array< std::array<std::array<std::array<int, 2>, 4>, 2>, 4> Parallel::m_secondNeighbor;
65 MPI_Init(&argn, &argv);
68 MPI_Comm_rank(MPI_COMM_WORLD, &m_rank);
69 MPI_Comm_size(MPI_COMM_WORLD, &m_numProcs);
78 std::array<int, 4> subLatticeSize){
81 m_latticeSubSize = subLatticeSize;
82 m_latticeFullSize = latticeSize;
93 MPI_Barrier(MPI_COMM_WORLD);
103 int subBlocksProd = 1;
104 for(
int i = 0; i < 4; i++){
105 m_subBlocks[i] = m_latticeFullSize[i] / m_latticeSubSize[i];
106 subBlocksProd *= m_subBlocks[i];
110 if(subBlocksProd > m_numProcs && m_rank == 0){
111 printf(
"ERROR: Too few processors given for specified lattice and sublattice sizes\n");
112 MPI_Abort(MPI_COMM_WORLD, 0);
114 m_activeProcs = subBlocksProd;
118 std::array<int, 4> period = {1,1,1,1};
122 MPI_Cart_create(MPI_COMM_WORLD, 4, m_subBlocks.data(), period.data(), reorder, &m_cartCoords);
125 if(m_rank + 1 <= m_activeProcs){
127 MPI_Cart_coords(m_cartCoords, m_rank, 4, m_rankCoord.data());
129 for(
int i = 0; i < 4; i++)
132 for(
int i = 0; i < 4; i++)
133 for(
int j = 0; j < 4; j++)
141 int active = m_isActive ==
true ? 1 : 0;
142 MPI_Comm_split(MPI_COMM_WORLD, active, 1, &m_cartCoordComm);
149 MPI_File_open(m_cartCoordComm, fileName, MPI_MODE_CREATE|MPI_MODE_RDWR,
150 MPI_INFO_NULL, &file);
157 MPI_File_close(&file);
164 MPI_Cart_shift(m_cartCoords, direction, 1,
165 &m_neighbor[direction][0],
166 &m_neighbor[direction][1]);
175 m_secondNeighbor[dir1][0][dir2][0],
176 m_secondNeighbor[dir1][1][dir2][1]);
178 m_secondNeighbor[dir1][1][dir2][0],
179 m_secondNeighbor[dir1][0][dir2][1]);
187 int dir2,
int shift2,
188 int& source,
int& dest){
189 std::array<int, 4> coords;
190 for(
int i = 0; i< 4; i++) coords[i] = m_rankCoord[i];
191 coords[dir1] += shift1;
192 coords[dir2] += shift2;
193 MPI_Cart_rank(comm, coords.data(), &dest);
194 for(
int i = 0; i< 4; i++) coords[i] = m_rankCoord[i];
195 coords[dir1] -= shift1;
196 coords[dir2] -= shift2;
197 MPI_Cart_rank(comm, coords.data(), &source);
204 return m_neighbor[direction][sign];
211 int direction2,
int sign2){
212 return m_secondNeighbor[direction1][sign1][direction2][sign2];
static void createNeighborLists()
Create castesian coordinates and creates neighbor lists for every processor in every direction...
static void assignSecondNeighbor(int dir1, int dir2)
use MPI utilities to find the neighbors in two direction
Utilities for parallelization.
static void initialize()
initializes MPI, gets the rank number and the total numer of processors
static void createGeometry(std::array< int, 4 > latticeSize, std::array< int, 4 > subLatticeSize)
creates the parallel gemometry of the lattice
static void assignNeighbor(int direction)
use MPI utilities to find the neighbors in one direction
static void MyMPI_Cart_shift2(MPI_Comm comm, int dir1, int shift1, int dir2, int shift2, int &source, int &dest)
use MPI utilities to find the neighbors in two direction
static void closeFile(MPI_File &file)
closes a file with MPI
static int getSecondNeighbor(int direction1, int sign1, int direction2, int sign2)
return the rank of the neighbor along the two given directions and signs
static int getNeighbor(int direction, int sign)
return the rank of the neighbor along the given direction and sign
static void finalize()
finalizes MPI
static void openFile(MPI_File &file, const char *fileName)
opens a file with MPI