, , , ,
OpenMP MPI. ,
, .
double t1 = MPI_Wtime();
: MPI_Wtime() MPI_Init() undefined. ,
MPI, - MPI_Barrier()
Wtime, .
, omp_get_num_threads() 1, ,
.
#pragma omp parallel num_threads(nThread)
num_threads nThread, , ,
OMP_NUM_THREADS.
num_threads .
default(shared)
shared,
default(shared) .
private(iam, i)
, , , iam i private,
,
( , , ).
#pragma omp for schedule(dynamic, 1)
, , schedule(dynamic, 1) , ,
,
.
int grandTotal=0;
for (int j=0;j<nThread;j++) {
printf("Total=%d\n",total[j]);
grandTotal += total[j];
}
, total
OpenMP reduction.
double t2 = MPI_Wtime();
, MPI_Init(), MPI_Wtime() ,
MPI_Finalize() undefined, , .
. , OpenMP, this
, , , OpenMP .
, , MPI,
comm . MPI
, .
MPI,
- , ? (: , , MPI,
).
:
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <mpi.h>
#include <omp.h>
int main(int argc, char *argv[])
{
MPI_Init(&argc, &argv);
int world_size,
world_rank;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
int name_len;
char proc_name[MPI_MAX_PROCESSOR_NAME];
MPI_Get_processor_name(proc_name, &name_len);
MPI_Barrier(MPI_COMM_WORLD);
double t_start = MPI_Wtime();
const int n_iterations = 1e7 / world_size;
int data[16];
for (int i = 0; i < 16; ++i)
data[i] = rand() % 16;
unsigned int total = 0;
#pragma omp parallel reduction(+:total)
{
int n_threads = omp_get_num_threads(),
thread_id = omp_get_thread_num();
#pragma omp master
{
printf("nThread = %d\n", n_threads);
}
#pragma omp for
for (int i = 0; i < n_iterations; i++)
total += data[i % 16];
printf("Hello from thread %d out of %d from process %d out of %d on %s\n",
thread_id, n_threads, world_rank, world_size, proc_name);
}
unsigned int grand_total;
MPI_Allreduce(&total, &grand_total, 1, MPI_UNSIGNED, MPI_SUM, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
double t_end = MPI_Wtime();
printf("Thread total = %d\n", total);
if (world_rank == 0)
{
printf("Grand Total = %d\n", grand_total);
printf("Time elapsed with MPI clock = %f\n", t_end - t_start);
}
MPI_Finalize();
return 0;
}
: 22 schedule(dynamic, 1), , .
, PBS, , , SLURM, sbatch file 6- node :
#!/bin/bash
export OMP_NUM_THREADS=6
srun ./a.out
, MPI OMP, ( ):

, . , 1-16 - 1 MPI 1-16 OMP, 16-256 - 1-16 MPI 16 , , MPI OMP.