1. Virtual Topology
개요

MPI_PROC_NULL(-2) return사용법
MPI_Cart_create()로 MPI communicator 생성MPI_COMM_WORLD 아닌 새로운 comm 생성2. Cartesian Topology 생성
MPI_Dims_create()
int MPI_Dims_create( int nnodes, int ndims, int dims[] )
nnodes, 차원 수 ndims 지정 시 각 차원으로 배치할 프로세스의 개수 return1. MPI_Cart_create
MPI_Cart_create()
int MPI_Cart_create( MPI_Comm comm_old, int ndims, const int dims[],
const int periods[], int reorder, MPI_Comm *comm_cart )
Arguments
| argument | 설명 |
|---|---|
| comm_old | 기존의 communicator (MPI_COMM_WORLD) |
| ndims | 좌표의 차원 수 |
| dims[] | 차원별 배치된 프로세스 수 |
| periods[] | 각 차원 별 주기성 여부 |
| reorder | False (보통 사용) (새로운 그룹에서 각 프로세스의 rank는 old group의 rank와 동일) / True (프로세스 랭크를 재할당). |
Example: C code
#include <stdio.h>
#include <mpi.h>
#define NDIM (2)
int main(void)
{
int myrank, nprocs, dims[NDIM]={0},newprocs, newrank;
int reorder=0, periods[NDIM]={0,0};
MPI_Comm comm_cart;
MPI_Init(NULL,NULL);
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
MPI_Dims_create(nprocs,NDIM,dims);
if(myrank==0) printf("DIMs = %d, %d\n",dims[0],dims[1]);
MPI_Cart_create(MPI_COMM_WORLD,NDIM, dims, periods, reorder, &comm_cart);
MPI_Finalize();
return 0;
}
Example: Result

2. Mapping Functions
MPI_Cart_rank()
int MPI_Cart_rank( MPI_Comm comm, const int cords[], int *rank )
MPI_Cart_coords()
int MPI_Cart_coords( MPI_Comm comm, int rank, int maxdims, int coords[])
Example: C code

#include <stdio.h>
#include <mpi.h>
int main(void){
MPI_Comm oldcomm, newcomm;
int ndims=2, dimsize[2];
int periods[2], reorder;
int myrank, nprocs, i,j, rank;
int coords[2]={0,};
MPI_Init(NULL,NULL);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
oldcomm=MPI_COMM_WORLD;
dimsize[0]=3, dimsize[1]=2;
periods[0]=1, periods[1]=0;
reorder=0;
MPI_Cart_create(oldcomm, ndims,dimsize,periods,reorder,&newcomm);
if(myrank==0){
for(i=0;i<dimsize[0];i++){
for(j=0;j<dimsize[1];j++){
coords[0]=i, coords[1]=j;
MPI_Cart_rank(newcomm, coords, &rank);
printf("coords=%d, %d, rank=%d\n",coords[0], coords[1], rank);
}
}
}
if(myrank==0){
for(rank=0;rank<nprocs;rank++){
MPI_Cart_coords(newcomm, rank, ndims,coords);
printf("rank=%d, coord=%d, %d\n",rank, coords[0], coords[1]);
}
}
MPI_Finalize();
return 0;
}
Example: Result

3. MPI_Cart_shift
MPI_Cart_shift()

int MPI_Cart_shift( MPI_Comm comm, int direction, int disp, int* rank_source, int* rank_dest )
Example: C code

#include <stdio.h>
#include <mpi.h>
int main(void)
{
MPI_Comm oldcomm,newcomm;
int ndims=2, dimsize[2];
int periods[2], reorder;
int myrank, nprocs, i,j, rank;
int coords[2];
int direction,disp, src, dest;
MPI_Init(NULL,NULL);
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
oldcomm=MPI_COMM_WORLD;
dimsize[0]=3, dimsize[1]=2;
periods[0]=1, periods[1]=0;
reorder=0;
MPI_Cart_create(oldcomm, ndims,dimsize,periods,reorder,&newcomm);
direction=1;disp=1;
MPI_Cart_shift(newcomm, direction,disp, &src, &dest);
printf(“rank: %d, source=%d, destination=%d\n”,myrank, src, dest);
MPI_Finalize();
return 0;
}
Example: Result

1. Neighborhood Collective Communication
개요
MPI_PROC_NULL(-2)로 정의됨2. MPI_Neighbor_allgather
MPI_Neighbor_allgather()
int MPI_Neighbor_allgather( const void* sendbuf, int sendcount, MPIDatatype sendtype,
void* recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
MPI_Allgather()와 동일Example: C code

#include <stdio.h>
#include <mpi.h>
int main(void)
{
int myrank,procs;
MPI_Comm comm_cart;
int ndim=2,dims[2], recvbuf[4]={-10,};
int periods[2]={0,0}, reorder=0;
MPI_Init(NULL,NULL);
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
MPI_Comm_size(MPI_COMM_WORLD,&procs);
dims[0]=4, dims[1]=4;
MPI_Cart_create(MPI_COMM_WORLD,ndim,dims,periods,reorder,&comm_cart);
MPI_Neighbor_allgather(&myrank,1,MPI_INT,recvbuf,1,MPI_INT,comm_cart);
printf("myrank : %d, recvbuf: %d, %d, %d, %d\n",myrank, recvbuf[0],recvbuf[1],recvbuf[2],recvbuf[3]);
MPI_Finalize();
return 0;
}
Example: Result


recvbuf의 초기값인 -10 출력3. MPI_Neighbor_alltoallv
MPI_Neighbor_alltoallv()
int MPI_Neighbor_alltoallv( const void *sendbuf, const int sendcounts[],
const int sdispls[], MPI_Datatype sendtype, void *recvbuf, const int recvcounts[],
const int rdispls[], MPI_Datatype recvtype, MPI_Comm comm)
MPI_Alltoallv()와 argument는 모두 같음사용

MPI_Neighbor_alltoallv()를 사용하면 한꺼번에 처리할 수 있음