/* Round_pack Example about communicating heterogeneous data using MPI_Pack/Unpack */ #include #include #include #include typedef struct{ double o; } msgtype; MPI_Datatype mpi_type; /* New MPI data type */ #define Lun_v 12 typedef struct{ msgtype v[Lun_v]; } heterogeneous; MPI_Datatype mpi_ncvect_type; /* New MPI data type */ MPI_Status v_status; int proc_rank, mpi_procs=0; MPI_Comm mpi_world; int start_mpi(int argc, char **argv) { if ( mpi_procs > 0 ) return(0); /* MPI is already initialized */ if ( MPI_Init(&argc, &argv) != MPI_SUCCESS ) return(-1); mpi_world = MPI_COMM_WORLD; if ( MPI_Comm_rank(mpi_world, &proc_rank) != MPI_SUCCESS ) return(-2); if ( MPI_Comm_size(mpi_world, &mpi_procs) != MPI_SUCCESS ) return(-3); return(0); } int build_ncvect_type() { const int nblocks = 4; const int clblock = 2; const int lblock = 3; MPI_Datatype elldatatype; elldatatype = MPI_DOUBLE; /* New MPI derived data type */ if ( MPI_Type_vector(nblocks, clblock, lblock, elldatatype, &mpi_ncvect_type) != MPI_SUCCESS ) return(-1); /* Before using mpi_ncvect_type it has to be registered in MPI */ if ( MPI_Type_commit(&mpi_ncvect_type) != MPI_SUCCESS ) return(-2); return(0); } int release_ncvect_type() { /* Just release derived data type */ if ( MPI_Type_free(&mpi_ncvect_type) != MPI_SUCCESS ) return(-2); return(0); } void initialize_ncvect_data(heterogeneous *given) { int k; for ( k = 0; k < 12; k++) { (*given).v[k].o = k * (double)1.0*pow((double)10,(double)3); } } void zero_ncvect_data(heterogeneous *taken) { int k; for ( k = 0; k < 12; k++) { (*taken).v[k].o = (double)0.0; } } void update_ncvect_data(heterogeneous *given, heterogeneous *taken) { int i; for ( i = 0; i < 12; i++) (*given).v[i].o=(*taken).v[i].o + 1.0; } /* Example program: round robin */ int main(int argc, char **argv) { int i; int next_proc, msg_tag = 111, prec_proc; heterogeneous given, taken; if ( start_mpi(argc, argv) != 0 ) { fprintf(stderr,"Error start_mpi\n"); } next_proc = ( proc_rank+1 ) % mpi_procs; prec_proc = (mpi_procs+proc_rank-1) % mpi_procs; initialize_ncvect_data(&given); if ( proc_rank== 0 ) { if ( MPI_Send(&given, 1, mpi_ncvect_type, next_proc, msg_tag, mpi_world) != MPI_SUCCESS ) { fprintf(stderr,"Error MPI_Send\n"); } } if ( MPI_Recv(&taken, 1, mpi_ncvect_type, prec_proc, msg_tag, mpi_world, &v_status) != MPI_SUCCESS ) { fprintf(stderr,"Error MPI_Recv\n"); } if ( proc_rank != 0 ) { update_ncvect_data(&given,&taken); if ( MPI_Send(&given, 1, mpi_ncvect_type, next_proc, msg_tag, mpi_world) != MPI_SUCCESS ) { fprintf(stderr,"Error MPI_Send\n"); } } else { fprintf(stdout,"Sent data =\n"); for ( i = 0; i < 12; i++) { fprintf(stdout,"%8.2lf ",given.v[i].o); if ( (i % 6) == 5 ) fprintf(stdout,"\n"); } fprintf(stdout,"\n\n"); fprintf(stdout,"Received data =\n"); for ( i = 0; i < 12; i++) { fprintf(stdout,"%8.2lf ",taken.v[i].o); if ( (i % 6) == 5 ) fprintf(stdout,"\n"); } fprintf(stdout,"\n"); } if ( release_ncvect_type() != 0 ) { fprintf(stderr,"Error release_ncvect_type\n"); } return(MPI_Finalize()); }