0% found this document useful (0 votes)
58 views

M Faisal Lab 08

This document contains code for a lab exercise on performing parallel rank operations using MPI. The code defines functions for gathering numbers to the root process, sorting the gathered numbers, retrieving the ranks, and performing the parallel rank operation across processes. The main function calls the TMPI_Rank function to get the rank of a random number on each process and print the results.

Uploaded by

Muhammad Ali
Copyright
© © All Rights Reserved
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
58 views

M Faisal Lab 08

This document contains code for a lab exercise on performing parallel rank operations using MPI. The code defines functions for gathering numbers to the root process, sorting the gathered numbers, retrieving the ranks, and performing the parallel rank operation across processes. The main function calls the TMPI_Rank function to get the rank of a random number on each process and print the results.

Uploaded by

Muhammad Ali
Copyright
© © All Rights Reserved
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 2

CSL-342: Parallel Programming lab

Semester BSIT – 06
Student Name: Muhammad Faisal
Registration: 43865

Lab 08: Performing Parallel Rank with MPI

Exercises

Exercise 1

write a program that implements two primary function (discussed above), and
put them all together into TMPI_Rank function.

Code

#include <stdio.h> comm_rank_number_b->number.i) {


#include <stdlib.h> return -1; } else if
#include <mpi.h> (comm_rank_number_a->number.i>
#include "tmpi_rank.h" #include <time.h> comm_rank_number_b->number.i) {
int main(int argc, char** argv) { return 1; } else { return 0; } }
MPI_Init(NULL, NULL); int *get_ranks(void *gathered_numbers,
int world_rank; int gathered_number_count, MPI_Datatype
MPI_Comm_rank(MPI_COMM_WORLD, datatype) { int datatype_size;
&world_rank); int world_size; MPI_Type_size(datatype, &datatype_size);
MPI_Comm_size(MPI_COMM_WORLD,
&world_size); CommRankNumber *comm_rank_numbers =
// Seed the random number generator to malloc(gathered_number_count *
get different results each time sizeof(CommRankNumber)); int i; for
srand(time(NULL) * world_rank); (i = 0; i < gathered_number_count; i++) {
float rand_num = rand() / comm_rank_numbers[i].comm_rank = i;
(float)RAND_MAX; int rank; memcpy(&(comm_rank_numbers[i].number),
TMPI_Rank(&rand_num, &rank, MPI_FLOAT, gathered_numbers + (i * datatype_size),
MPI_COMM_WORLD); printf("Rank for %f on datatype_size); }
process %d - %d\n", rand_num, world_rank, if (datatype == MPI_FLOAT) {
rank); qsort(comm_rank_numbers,
MPI_Barrier(MPI_COMM_WORLD); gathered_number_count,
MPI_Finalize(); } sizeof(CommRankNumber),
&compare_float_comm_rank_number); }
#include <stdlib.h> else { qsort(comm_rank_numbers,
#include <mpi.h> gathered_number_count,
#include <string.h> sizeof(CommRankNumber),
typedef struct { int comm_rank; union &compare_int_comm_rank_number); }
{ float f; int i; } number; }
CommRankNumber;
int *ranks = (int *)malloc(sizeof(int) *
gathered_number_count); for (i = 0; i <
void *gather_numbers_to_root(void
gathered_number_count; i++) {
*number, MPI_Datatype datatype, MPI_Comm
ranks[comm_rank_numbers[i].comm_rank] =
comm)
i; }
{ int comm_rank, comm_size;
// Clean up and return the rank array
MPI_Comm_rank(comm, &comm_rank);
free(comm_rank_numbers); return ranks;
MPI_Comm_size(comm, &comm_size);
}
int datatype_size;
MPI_Type_size(datatype, &datatype_size);
void *gathered_numbers; if (comm_rank
== 0) { gathered_numbers = int TMPI_Rank(void *send_data, void
malloc(datatype_size * comm_size); } *recv_data, MPI_Datatype datatype,
int compare_float_comm_rank_number(const MPI_Comm comm) { // Check base cases
void *a, const void *b) { first - Only support MPI_INT and
CommRankNumber *comm_rank_number_a = MPI_FLOAT for this function. if
(CommRankNumber *)a; CommRankNumber (datatype != MPI_INT && datatype !=
*comm_rank_number_b = (CommRankNumber MPI_FLOAT) { return MPI_ERR_TYPE; }
*)b; if (comm_rank_number_a->number.f < int comm_size, comm_rank;
comm_rank_number_b->number.f) { MPI_Comm_size(comm, &comm_size);
return -1; } else if MPI_Comm_rank(comm, &comm_rank);
(comm_rank_number_a->number.f > void *gathered_numbers =
comm_rank_number_b->number.f) { gather_numbers_to_root(send_data,
return 1; } else { return 0; } } datatype, comm);
int compare_int_comm_rank_number(const int *ranks = NULL; if (comm_rank == 0)
void *a, const void *b) { { ranks = get_ranks(gathered_numbers,
CommRankNumber *comm_rank_number_a = comm_size, datatype); }
(CommRankNumber *)a; CommRankNumber MPI_Scatter(ranks, 1, MPI_INT, recv_data,
*comm_rank_number_b = (CommRankNumber 1, MPI_INT, 0, comm);
*)b; if (comm_rank_number_a->number.i < if (comm_rank == 0) {
free(gathered_numbers); free(ranks);
} }

Output:

You might also like