int MPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm) *buf: root, pointer to the address containing the data elements to be broadcastedroot, pointer to the address where broadcasted data to be stored.count: how many data elements will be broadcasted.MPI_Datatype: MPI_BYTE, MPI_PACKED, MPI_CHAR, MPI_SHORT, MPI_INT, MPI_LONG, MPI_FLOAT, MPI_DOUBLE, MPI_LONG_DOUBLE, MPI_UNSIGNED_CHAR, and other user-defined types.root: rank of the process where the original data will be broadcasted.tag: an integer identify the message. Programmer is responsible for managing tag.comm: communicator (typically just used MPI_COMM_WORLD)intro-mpi, create a file named bcast.c with the following contentsbcast.c:
1
2
mpicc -o bcast bcast.c
mpirun --host compute01:2,compute02:2 -np 4 ./bcast
int MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf,int recvcount,MPI_Datatype recvtype, int root, MPI_Comm comm) *sendbuf: pointer to the address containing the array of data elements to be scattered.sendcount: how many data elements to be sent to each process of the communicator.*recvbuf: pointer to the address on each process of the communicator, where the scattered portion will be written.recvcount: how many data elements to be received by each process of the communicator.root: rank of the process from where the original data will be scattered.comm: communicator (typically just used MPI_COMM_WORLD)intro-mpi, create a file named scatter.c with the following contentsscatter.c:
1
2
mpicc -o scatter scatter.c
mpirun --host compute01:2,compute02:2 -np 4 ./scatter
int MPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf,int recvcount,MPI_Datatype recvtype, int root, MPI_Comm comm) *sendbuf: pointer to the address on each process of the communicator, containing the array of data elements to be gathered.sendcount: how many data elements from each process of the communicator to be sent back to the root process.*recvbuf: pointer to the address on the root process where all gathered data will be written.recvcount: how many data elements to be received from each process of the communicator.root: rank of the process from where the original data will be gathered.comm: communicator (typically just used MPI_COMM_WORLD)intro-mpi, create a file named gather.c with the following contentsgather.c:
1
2
mpicc -o gather gather.c
mpirun --host compute01:2,compute02:2 -np 4 ./gather
int MPI_Reduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_OP op,int root, MPI_Comm comm) *sendbuf: pointer to the address on each process of the communicator, containing the array of data elements to be reduced.*recvbuf: pointer to the address on the root process where all final reduced data will be written.count: how many data elements to be received from each process of the communicator. If count > 1, then operation is performed element-wise.op: may be MPI_MIN, MPI_MAX, MPI_SUM, MPI_PROD (twelve total). Programmer may add operations, must be commutative and associative.root: rank of the process from where the original data will be gathered.comm: communicator (typically just used MPI_COMM_WORLD).intro-mpi, create a file named gather.c with the following contentsreduce.c:
1
2
mpicc -o reduce reduce.c
mpirun --host compute01:2,compute02:2 -np 4 ./reduce