manual_all2all_eng.c
/******************************************************************/
/*                                                                */
/* Questions 3-5: Alltoall                                        */
/*                                                                */
/******************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <mpi.h>
#include <sys/time.h>
#define BUFLEN 65536
long ms_time(void) 
{
  struct timeval tv;
  gettimeofday(&tv, NULL);
  return(tv.tv_sec*1000 + tv.tv_usec/1000);
}
void long_computation(void) 
{
   long now=ms_time();
   while(ms_time()<now+4000) 
     {
     }
}
void my_broadcast(void *buffer, int count, MPI_Datatype datatype, int root) {
  int my_id, nb_proc;
  MPI_Status status;
  int i;
  MPI_Comm_rank(MPI_COMM_WORLD,&my_id);
  MPI_Comm_size(MPI_COMM_WORLD,&nb_proc);
  
  if (my_id == root) {
    for (i=0; i<nb_proc; i++) {
      if (i != root) {
	MPI_Send(buffer, count, datatype,i,0,MPI_COMM_WORLD);
      }
    }
  } else {
    MPI_Recv(buffer,count,datatype, root, 0, MPI_COMM_WORLD, &status);
  }
}
/*
 * We assume that the data to be sent is at buffer[my_rank]
 * and we will receive the data from process i at buffer[i]
 * We also assume that the buffer contains integers.
 */
void my_alltoall(int *buffer, int count, MPI_Datatype datatype) {
  int root, nb_proc;
  
  MPI_Comm_size(MPI_COMM_WORLD,&nb_proc);
  for(root=0; root<nb_proc; root++) {
    my_broadcast(&(buffer[root]), count, datatype, root);
  }
}
void my_asynchronous_all2all(int *buffer, int count, MPI_Datatype datatype) {
  int my_id, nb_proc;
  int i;
  int nb_requests;
  MPI_Request *send_requests;
  MPI_Request *recv_requests;
  MPI_Status *status_array;
  MPI_Comm_rank(MPI_COMM_WORLD,&my_id);
  MPI_Comm_size(MPI_COMM_WORLD,&nb_proc);
  send_requests = (MPI_Request*) malloc((nb_proc - 1) * sizeof(MPI_Request));
  recv_requests = (MPI_Request*) malloc((nb_proc - 1) * sizeof(MPI_Request));
  status_array = (MPI_Status*) malloc((nb_proc - 1) * sizeof(MPI_Status));
  
  nb_requests = 0;
  for(i=0; i<nb_proc; i++) {
    if (i != my_id) { 
      /* 
       * We planify the send and receive operation from and to process i.
       */
      MPI_Isend(&buffer[my_id], count, datatype, i, 1, MPI_COMM_WORLD, &send_requests[nb_requests]);
      MPI_Irecv(&buffer[i], count, datatype, i, 1, MPI_COMM_WORLD, &recv_requests[nb_requests]);
      nb_requests++;
    }
  }
 
  /* We wait for all sending and receiving requests to end */ 
  MPI_Waitall(nb_requests,send_requests,status_array);
  MPI_Waitall(nb_requests,recv_requests,status_array);
  
}
int main(int argc, char **argv) 
{
  int nb_proc;
  int my_id;
  int *buffer;
  long int start_time, end_time;
  int i;
  MPI_Init(&argc,&argv);
  MPI_Comm_size(MPI_COMM_WORLD,&nb_proc);
  MPI_Comm_rank(MPI_COMM_WORLD,&my_id);
 
  buffer = (int*) malloc(nb_proc * sizeof(int));
  start_time = ms_time();
  /*
   * We first put our id in  buffer[id], and set everything else to zero
   */
  for(i=0;i<nb_proc;i++) {
    buffer[i] = 0;
  }
  buffer[my_id] = my_id;
  /*
   * Everybody prints its buffer before the Alltoall
   */  
  printf("[proc P%d] Before all2all, my buffer is ",my_id);
  for(i=0;i<nb_proc;i++) {
    printf("%d ",buffer[i]);
  }
  printf("\n");
  /* Now we want to share this buffer */
  /* Question 3. Using synchronous communications */
    my_alltoall(buffer, 1, MPI_INT);
  /* Question 4. Using asynchronous communications */
  //  my_asynchronous_all2all(buffer, 1, MPI_INT);
  /* Question 5. Using built-in MPI functions */
  //  MPI_Allgather(&buffer[my_id], 1, MPI_INT, buffer, 1, MPI_INT, MPI_COMM_WORLD);
  
  /*
   * Everybody prints its buffer after the Alltoall
   */  
  printf("[proc P%d] After all2all, my buffer is ",my_id);
  for(i=0;i<nb_proc;i++) {
    printf("%d ",buffer[i]);
  }
  printf("\n");
  long_computation();
  end_time = ms_time();
  printf("[proc P%d] Done within %ld milliseconds.\n", my_id, end_time - start_time);
  MPI_Finalize();
  return (0);
}
Generated by GNU enscript 1.6.3.