OpenFPM_pdata  1.1.0
Project that contain the implementation of distributed structures
 All Data Structures Namespaces Functions Variables Typedefs Enumerations Friends Pages
HDF5_reader_gd.hpp
1 /*
2  * HDF5_reader_gr.hpp
3  *
4  * Created on: May 2, 2017
5  * Author: i-bird
6  */
7 
8 #ifndef OPENFPM_IO_SRC_HDF5_WR_HDF5_READER_GD_HPP_
9 #define OPENFPM_IO_SRC_HDF5_WR_HDF5_READER_GD_HPP_
10 
11 
12 #include "Packer_Unpacker/Pack_selector.hpp"
13 #include "Packer_Unpacker/Packer.hpp"
14 #include "Packer_Unpacker/Unpacker.hpp"
15 #include "util/GBoxes.hpp"
16 
17 template <>
18 class HDF5_reader<GRID_DIST>
19 {
20  template<typename device_grid> void load_block(long int bid,
21  hssize_t mpi_size_old,
22  int * metadata_out,
23  openfpm::vector<size_t> & metadata_accum,
24  hid_t plist_id,
25  hid_t dataset_2,
26  openfpm::vector<device_grid> & loc_grid_old,
28  {
29  hsize_t offset[1];
30  hsize_t block[1];
31 
32  if (bid < mpi_size_old && bid != -1)
33  {
34  offset[0] = metadata_accum.get(bid);
35  block[0] = metadata_out[bid];
36  }
37  else
38  {
39  offset[0] = 0;
40  block[0] = 0;
41  }
42 
43  hsize_t count[1] = {1};
44 
45  //Select file dataspace
46  hid_t file_dataspace_id_2 = H5Dget_space(dataset_2);
47 
48  H5Sselect_hyperslab(file_dataspace_id_2, H5S_SELECT_SET, offset, NULL, count, block);
49 
50  hsize_t mdim_2[1] = {block[0]};
51 
52  //Create data space in memory
53  hid_t mem_dataspace_id_2 = H5Screate_simple(1, mdim_2, NULL);
54 
55  // allocate the memory
56  HeapMemory pmem;
57  //pmem.allocate(req);
58  ExtPreAlloc<HeapMemory> & mem = *(new ExtPreAlloc<HeapMemory>(block[0],pmem));
59  mem.incRef();
60 
61  // Read the dataset.
62  H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_2, file_dataspace_id_2, plist_id, (char *)mem.getPointer());
63 
64  mem.allocate(pmem.size());
65 
66  Unpack_stat ps;
67 
68  openfpm::vector<device_grid> loc_grid_old_unp;
70 
73 
74  for (size_t i = 0; i < loc_grid_old_unp.size(); i++)
75  loc_grid_old.add(loc_grid_old_unp.get(i));
76 
77  for (size_t i = 0; i < gdb_ext_old_unp.size(); i++)
78  gdb_ext_old.add(gdb_ext_old_unp.get(i));
79 
80  mem.decRef();
81  delete &mem;
82 
83  }
84 
85 public:
86 
87  template<typename device_grid> inline void load(const std::string & filename,
88  openfpm::vector<device_grid> & loc_grid_old,
90  {
91  Vcluster & v_cl = create_vcluster();
92 
93  MPI_Comm comm = v_cl.getMPIComm();
94  MPI_Info info = MPI_INFO_NULL;
95 
96  int mpi_rank = v_cl.getProcessUnitID();
97  //int mpi_size = v_cl.getProcessingUnits();
98 
99  // Set up file access property list with parallel I/O access
100  hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
101  H5Pset_fapl_mpio(plist_id, comm, info);
102 
103  //Open a file
104  hid_t file = H5Fopen (filename.c_str(), H5F_ACC_RDONLY, plist_id);
105  H5Pclose(plist_id);
106 
107  //Open dataset
108  hid_t dataset = H5Dopen (file, "metadata", H5P_DEFAULT);
109 
110  //Create property list for collective dataset read
111  plist_id = H5Pcreate(H5P_DATASET_XFER);
112  H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
113 
114  //Select file dataspace
115  hid_t file_dataspace_id = H5Dget_space(dataset);
116 
117  hssize_t mpi_size_old = H5Sget_select_npoints (file_dataspace_id);
118 
119  //if (mpi_rank == 0)
120  //printf ("\nOld MPI size: %llu\n", mpi_size_old);
121 
122  //Where to read metadata
123  int metadata_out[mpi_size_old];
124 
125  for (int i = 0; i < mpi_size_old; i++)
126  {
127  metadata_out[i] = 0;
128  }
129 
130  //Size for data space in memory
131  hsize_t mdim[1] = {(size_t)mpi_size_old};
132 
133  //Create data space in memory
134  hid_t mem_dataspace_id = H5Screate_simple(1, mdim, NULL);
135 
136 /*
137  if (mpi_rank == 0)
138  {
139  hssize_t size;
140 
141  size = H5Sget_select_npoints (mem_dataspace_id);
142  printf ("\nmemspace_id size: %llu\n", size);
143  size = H5Sget_select_npoints (file_dataspace_id);
144  printf ("dataspace_id size: %llu\n", size);
145  }
146 */
147  // Read the dataset.
148  H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace_id, file_dataspace_id, plist_id, metadata_out);
149 /*
150  if (mpi_rank == 0)
151  {
152  std::cout << "Metadata_out[]: ";
153  for (int i = 0; i < mpi_size_old; i++)
154  {
155  std::cout << metadata_out[i] << " ";
156  }
157  std::cout << " " << std::endl;
158  }
159 */
160 
161  openfpm::vector<size_t> metadata_accum;
162  metadata_accum.resize(mpi_size_old);
163 
164  metadata_accum.get(0) = 0;
165  for (int i = 1 ; i < mpi_size_old ; i++)
166  metadata_accum.get(i) = metadata_accum.get(i-1) + metadata_out[i-1];
167 
168  //Open dataset
169  hid_t dataset_2 = H5Dopen (file, "grid_dist", H5P_DEFAULT);
170 
171  //Create property list for collective dataset read
172  plist_id = H5Pcreate(H5P_DATASET_XFER);
173  H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
174 
176 
177  openfpm::vector<size_t> n_block;
178  n_block.resize(v_cl.getProcessingUnits());
179 
180 
181  for(size_t i = 0 ; i < n_block.size() ; i++)
182  n_block.get(i) = mpi_size_old / v_cl.getProcessingUnits();
183 
184  size_t rest_block = mpi_size_old % v_cl.getProcessingUnits();
185 
186  // std::cout << "MPI size old: " << mpi_size_old << std::endl;
187  //std::cout << "MPI size: " << v_cl.getProcessingUnits() << std::endl;
188 
189 
190  // std::cout << "Rest block: " << rest_block << std::endl;
191 
192  size_t max_block;
193 
194  if (rest_block != 0)
195  max_block = n_block.get(0) + 1;
196  else
197  max_block = n_block.get(0);
198 
199  //for(size_t i = 0 ; i < n_block.size() ; i++)
200  for(size_t i = 0 ; i < rest_block ; i++)
201  n_block.get(i) += 1;
202 
203 
204  //for(size_t i = 0 ; i < n_block.size() ; i++)
205  //std::cout << "n_block.get(i): " << n_block.get(i) << std::endl;
206 
207  size_t start_block = 0;
208  size_t stop_block = 0;
209 
210 
211  if (v_cl.getProcessUnitID() != 0)
212  {
213  for(size_t i = 0 ; i < v_cl.getProcessUnitID() ; i++)
214  start_block += n_block.get(i);
215  }
216 
217  stop_block = start_block + n_block.get(v_cl.getProcessUnitID());
218 
219 // std::cout << "ID: " << v_cl.getProcessUnitID() << "; Start block: " << start_block << "; " << "Stop block: " << stop_block << std::endl;
220 
221  if (mpi_rank >= mpi_size_old)
222  load_block(start_block,mpi_size_old,metadata_out,metadata_accum,plist_id,dataset_2,loc_grid_old,gdb_ext_old);
223  else
224  {
225  size_t n_bl = 0;
226  size_t lb = start_block;
227  for ( ; lb < stop_block ; lb++, n_bl++)
228  load_block(lb,mpi_size_old,metadata_out,metadata_accum,plist_id,dataset_2,loc_grid_old,gdb_ext_old);
229 
230  if (n_bl < max_block)
231  load_block(-1,mpi_size_old,metadata_out,metadata_accum,plist_id,dataset_2,loc_grid_old,gdb_ext_old);
232  }
233 
235 
236  //std::cout << "LOAD: sum: " << sum << std::endl;
237 
238  // Close the dataset.
239  H5Dclose(dataset);
240  H5Dclose(dataset_2);
241  // Close the file.
242  H5Fclose(file);
243  H5Pclose(plist_id);
244  }
245 
246 };
247 
248 
249 #endif /* OPENFPM_IO_SRC_HDF5_WR_HDF5_READER_GD_HPP_ */
Unpacker class.
Definition: Packer_util.hpp:20
size_t getProcessUnitID()
Get the process unit id.
MPI_Comm getMPIComm()
Get the MPI_Communicator (or processor group) this VCluster is using.
virtual void * getPointer()
Return the pointer of the last allocation.
size_t size()
Stub size.
Definition: map_vector.hpp:70
virtual size_t size() const
the the size of the allocated memory
Definition: HeapMemory.cpp:157
This class allocate, and destroy CPU memory.
Definition: HeapMemory.hpp:39
virtual bool allocate(size_t sz)
Allocate a chunk of memory.
Definition: ExtPreAlloc.hpp:92
Implementation of VCluster class.
Definition: VCluster.hpp:36
This structure store the Box that define the domain inside the Ghost + domain box.
Definition: GBoxes.hpp:39
virtual void incRef()
Increment the reference counter.
Definition: ExtPreAlloc.hpp:69
Unpacking status object.
Definition: Pack_stat.hpp:15
virtual void decRef()
Decrement the reference counter.
Definition: ExtPreAlloc.hpp:73
size_t getProcessingUnits()
Get the total number of processors.