Send dynamic array with dynamic size using MPI_Bcast

OpenMPI: I want to read the file in the root of the node and send the contents of this file to all the other nodes. I found that MPI_Bcast does this:

int MPI_Bcast(void *buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm) 

All the examples I found have the count value already known, but in my case, the count value is basically known in the root. Other examples say that the same MPI_Bcast call retrieves data on other nodes.

I added the following:

 typedef short Descriptor[128]; MPI_Datatype descriptorType; MPI_Type_contiguous(sizeof(Descriptor), MPI_SHORT, &descriptorType); MPI_Type_commit(&descriptorType); if(world_rank == 0) { struct stat finfo; if(stat(argv[1], &finfo) == 0) { querySize = finfo.st_size/sizeof(Descriptor); } { //read binary query queryDescriptors = new Descriptor[querySize]; fstream qFile(argv[1], ios::in | ios::binary); qFile.read((char*)queryDescriptors, querySize*sizeof(Descriptor)); qFile.close(); } } MPI_Bcast((void*)&querySize, 1, MPI_INT, 0, MPI_COMM_WORLD); if (world_rank != 0) { queryDescriptors = new Descriptor[querySize]; } MPI_Bcast((void*)queryDescriptors, querySize, descriptorType, 0, MPI_COMM_WORLD); 

When I call it this way: mpirun -np 2 ./mpi_hello_world it works fine, but when I call it more than 2 , I get the following:

 mpi_hello_world: malloc.c:3096: sYSMALLOc: Assertion `(old_top == (((mbinptr) (((char *) &((av)->bins[((1) - 1) * 2])) - __builtin_offsetof (struct malloc_chunk, fd)))) && old_size == 0) || ((unsigned long) (old_size) >= (unsigned long)((((__builtin_offsetof (struct malloc_chunk, fd_nextsize))+((2 * (sizeof(size_t))) - 1)) & ~((2 * (sizeof(size_t))) - 1))) && ((old_top)->size & 0x1) && ((unsigned long)old_end & pagemask) == 0)' failed. mpi_hello_world: malloc.c:3096: sYSMALLOc: Assertion `(old_top == (((mbinptr) (((char *) &((av)->bins[((1) - 1) * 2])) - __builtin_offsetof (struct malloc_chunk, fd)))) && old_size == 0) || ((unsigned long) (old_size) >= (unsigned long)((((__builtin_offsetof (struct malloc_chunk, fd_nextsize))+((2 * (sizeof(size_t))) - 1)) & ~((2 * (sizeof(size_t))) - 1))) && ((old_top)->size & 0x1) && ((unsigned long)old_end & pagemask) == 0)' failed. 
+5
source share
1 answer

If qFile.read(...) not enclosed in the if(rank==0){} tag, all processes will read the file. And queryDescriptors = new Descriptor[querySize]; should be called after the first MPI_Bcast() for all processes except 0: before, querySize does not make sense in these processes.

Process 0 should:

  • read the number of items
  • highlight
  • read array
  • broadcast number of items
  • array transfer

Other processes should:

  • get the number of elements
  • highlight
  • get an array

Here is an example of how to read a float array and use dynamic allocation:

 #include <stdio.h> #include <iostream> #include <fstream> #include <mpi.h> using namespace std; int main (int argc, char *argv[]) { int rank; int size; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); if(rank == 0) { //creating the file ofstream myfile; myfile.open ("example.txt", ios::out |ios::binary); int nbitem=42; myfile.write((char*)&nbitem,sizeof(int)); float a=0; for(int i=0;i<nbitem;i++){ myfile.write((char*)&a,sizeof(float)); a+=2; } myfile.close(); } //now reading the file int nbitemread=0; float* buffer; if(rank==0){ ifstream file ("example.txt", ios::in |ios::binary); file.read ((char*)&nbitemread, sizeof(int)); buffer=new float[nbitemread]; file.read ((char*)buffer,nbitemread* sizeof(float)); file.close(); //communication MPI_Bcast(&nbitemread, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(buffer, nbitemread, MPI_FLOAT, 0, MPI_COMM_WORLD); }else{ MPI_Bcast(&nbitemread, 1, MPI_INT, 0, MPI_COMM_WORLD); //nbitemread is meaningfull now buffer=new float[nbitemread]; MPI_Bcast(buffer, nbitemread, MPI_FLOAT, 0, MPI_COMM_WORLD); } //printing... cout<<"on rank "<<rank<<" rode "<<buffer[nbitemread/2]<<" on position "<<nbitemread/2<<endl; delete[] buffer; MPI_Finalize(); return 0; } 

Compile it with mpiCC main.cpp -o main and run mpirun -np 2 main

Another problem in your code is MPI_Type_contiguous(sizeof(Descriptor), MPI_SHORT, &descriptorType); . This should be MPI_Type_contiguous(sizeof(Descriptor), MPI_CHAR, &descriptorType); Here is a piece of code based on yours that should do the trick:

 #include <stdio.h> #include <iostream> #include <fstream> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <mpi.h> using namespace std; int main (int argc, char *argv[]) { int world_rank; int size; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); int querySize; typedef short Descriptor[128]; MPI_Datatype descriptorType; MPI_Type_contiguous(sizeof(Descriptor), MPI_CHAR, &descriptorType); MPI_Type_commit(&descriptorType); Descriptor* queryDescriptors; if(world_rank == 0) { struct stat finfo; if(stat(argv[1], &finfo) == 0) { cout<<"st_size "<<finfo.st_size<<" descriptor "<<sizeof(Descriptor)<< endl; querySize = finfo.st_size/sizeof(Descriptor); cout<<"querySize "<<querySize<<endl; }else{ cout<<"stat error"<<endl; } { //read binary query queryDescriptors = new Descriptor[querySize]; fstream qFile(argv[1], ios::in | ios::binary); qFile.read((char*)queryDescriptors, querySize*sizeof(Descriptor)); qFile.close(); } } MPI_Bcast((void*)&querySize, 1, MPI_INT, 0, MPI_COMM_WORLD); if (world_rank != 0) { queryDescriptors = new Descriptor[querySize]; } MPI_Bcast((void*)queryDescriptors, querySize, descriptorType, 0, MPI_COMM_WORLD); cout<<"on rank "<<world_rank<<" rode "<<queryDescriptors[querySize/2][12]<<" on position "<<querySize/2<<endl; delete[] queryDescriptors; MPI_Finalize(); return 0; } 
+2
source

Source: https://habr.com/ru/post/1211011/


All Articles