LatticeYangMills
parallel.cpp
Go to the documentation of this file.
1 /******************************************************************************
2 *
3 * MIT License
4 *
5 * Copyright (c) 2018 Giovanni Pederiva
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 ******************************************************************************/
25 
34 #include "Utils/clusterspecifier.h"
35 #ifndef LACONIA
36 #include <mpi/mpi.h>
37 #else
38 #include <mpi.h>
39 #endif
40 #include "ParallelTools/parallel.h"
41 #include <array>
42 
43 // Initialize static variables
44 int Parallel::m_rank;
45 int Parallel::m_numProcs;
46 int Parallel::m_activeProcs;
47 bool Parallel::m_isActive;
48 MPI_Comm Parallel::m_cartCoords;
49 MPI_Comm Parallel::m_cartCoordComm;
50 std::array<int, 4> Parallel::m_subBlocks;
51 std::array<int, 4> Parallel::m_rankCoord;
52 std::array<int, 4> Parallel::m_latticeSubSize;
53 std::array<int, 4> Parallel::m_latticeFullSize;
54 std::array<int, 4> Parallel::m_parity;
55 std::array< std::array<int, 2>, 4> Parallel::m_neighbor;
56 std::array< std::array<std::array<std::array<int, 2>, 4>, 2>, 4> Parallel::m_secondNeighbor;
57 
62  // initialize MPI
63  int argn = 1;
64  char** argv;
65  MPI_Init(&argn, &argv);
66 
67  // Get rank and number of processors
68  MPI_Comm_rank(MPI_COMM_WORLD, &m_rank);
69  MPI_Comm_size(MPI_COMM_WORLD, &m_numProcs);
70 }
71 
77 void Parallel::createGeometry(std::array<int, 4> latticeSize,
78  std::array<int, 4> subLatticeSize){
79 
80  // Fix sizes from input
81  m_latticeSubSize = subLatticeSize;
82  m_latticeFullSize = latticeSize;
83 
84  // Create castesian coordinates neighbor lists
86 
87 }
88 
93  MPI_Barrier(MPI_COMM_WORLD);
94  MPI_Finalize();
95 }
96 
101 
102  // Count the number of subblocks per dimension
103  int subBlocksProd = 1;
104  for(int i = 0; i < 4; i++){
105  m_subBlocks[i] = m_latticeFullSize[i] / m_latticeSubSize[i];
106  subBlocksProd *= m_subBlocks[i];
107  }
108 
109  // Check if there are enough processors to fill the lattice
110  if(subBlocksProd > m_numProcs && m_rank == 0){
111  printf("ERROR: Too few processors given for specified lattice and sublattice sizes\n");
112  MPI_Abort(MPI_COMM_WORLD, 0);
113  } else {
114  m_activeProcs = subBlocksProd;
115  }
116 
117  // MPI_Cart required variables
118  std::array<int, 4> period = {1,1,1,1}; // for periodic boundaries
119  int reorder = 0; // no new rank naming
120 
121  // Create coordinates map
122  MPI_Cart_create(MPI_COMM_WORLD, 4, m_subBlocks.data(), period.data(), reorder, &m_cartCoords);
123 
124  // Create neighbor lists and intercommunicator
125  if(m_rank + 1 <= m_activeProcs){
126  m_isActive = true;
127  MPI_Cart_coords(m_cartCoords, m_rank, 4, m_rankCoord.data());
128  // Assign neighbors along all directions
129  for(int i = 0; i < 4; i++)
130  assignNeighbor(i);
131 
132  for(int i = 0; i < 4; i++)
133  for(int j = 0; j < 4; j++)
134  assignSecondNeighbor(i, j);
135  } else {
136  // Set extra processors to inactive
137  m_isActive = false;
138  }
139 
140  // Create intracommunicator
141  int active = m_isActive == true ? 1 : 0;
142  MPI_Comm_split(MPI_COMM_WORLD, active, 1, &m_cartCoordComm);
143 }
144 
148 void Parallel::openFile(MPI_File &file, const char* fileName){
149  MPI_File_open(m_cartCoordComm, fileName, MPI_MODE_CREATE|MPI_MODE_RDWR,
150  MPI_INFO_NULL, &file);
151 }
152 
156 void Parallel::closeFile(MPI_File &file){
157  MPI_File_close(&file);
158 }
159 
163 void Parallel::assignNeighbor(int direction){
164  MPI_Cart_shift(m_cartCoords, direction, 1,
165  &m_neighbor[direction][0], // recv rank
166  &m_neighbor[direction][1]); // send rank
167 }
168 
172 void Parallel::assignSecondNeighbor(int dir1, int dir2){
173  if(dir1 != dir2){
174  MyMPI_Cart_shift2(m_cartCoords, dir1, 1, dir2, 1,
175  m_secondNeighbor[dir1][0][dir2][0], // recv rank
176  m_secondNeighbor[dir1][1][dir2][1]); // send rank
177  MyMPI_Cart_shift2(m_cartCoords, dir1, -1, dir2, 1,
178  m_secondNeighbor[dir1][1][dir2][0], // recv rank
179  m_secondNeighbor[dir1][0][dir2][1]); // send rank
180  }
181 }
182 
186 void Parallel::MyMPI_Cart_shift2(MPI_Comm comm, int dir1, int shift1,
187  int dir2, int shift2,
188  int& source, int& dest){
189  std::array<int, 4> coords;
190  for(int i = 0; i< 4; i++) coords[i] = m_rankCoord[i];
191  coords[dir1] += shift1;
192  coords[dir2] += shift2;
193  MPI_Cart_rank(comm, coords.data(), &dest);
194  for(int i = 0; i< 4; i++) coords[i] = m_rankCoord[i];
195  coords[dir1] -= shift1;
196  coords[dir2] -= shift2;
197  MPI_Cart_rank(comm, coords.data(), &source);
198 }
199 
203 int Parallel::getNeighbor(int direction, int sign){
204  return m_neighbor[direction][sign];
205 }
206 
210 int Parallel::getSecondNeighbor(int direction1, int sign1,
211  int direction2, int sign2){
212  return m_secondNeighbor[direction1][sign1][direction2][sign2];
213 }
static void createNeighborLists()
Create castesian coordinates and creates neighbor lists for every processor in every direction...
Definition: parallel.cpp:100
static void assignSecondNeighbor(int dir1, int dir2)
use MPI utilities to find the neighbors in two direction
Definition: parallel.cpp:172
Utilities for parallelization.
static void initialize()
initializes MPI, gets the rank number and the total numer of processors
Definition: parallel.cpp:61
static void createGeometry(std::array< int, 4 > latticeSize, std::array< int, 4 > subLatticeSize)
creates the parallel gemometry of the lattice
Definition: parallel.cpp:77
static void assignNeighbor(int direction)
use MPI utilities to find the neighbors in one direction
Definition: parallel.cpp:163
static void MyMPI_Cart_shift2(MPI_Comm comm, int dir1, int shift1, int dir2, int shift2, int &source, int &dest)
use MPI utilities to find the neighbors in two direction
Definition: parallel.cpp:186
static void closeFile(MPI_File &file)
closes a file with MPI
Definition: parallel.cpp:156
static int getSecondNeighbor(int direction1, int sign1, int direction2, int sign2)
return the rank of the neighbor along the two given directions and signs
Definition: parallel.cpp:210
static int getNeighbor(int direction, int sign)
return the rank of the neighbor along the given direction and sign
Definition: parallel.cpp:203
static void finalize()
finalizes MPI
Definition: parallel.cpp:92
static void openFile(MPI_File &file, const char *fileName)
opens a file with MPI
Definition: parallel.cpp:148