0% found this document useful (0 votes)
18 views

Digital Lab Assignment-4 Parallel Distribution and Computing

Uploaded by

siddharth gupta
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
18 views

Digital Lab Assignment-4 Parallel Distribution and Computing

Uploaded by

siddharth gupta
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 13

DIGITAL LAB ASSIGNMENT-4

PARALLEL DISTRIBUTION AND


COMPUTING

Name-Ranuj Pradhan

Reg.No-20BCE2459
CRISTIAN ALGORITHM
The Cristian's algorithm is a time synchronization
protocol used in distributed systems. It is designed to
synchronize the clocks of different nodes in a network to
a common time reference. The algorithm was proposed
by Leslie Lamport and is named after its co-author,
Cristian Flaviu.

The basic idea behind the Cristian's algorithm is that a


client node sends a request for the current time to a
server node, which responds with its current time. The
client then calculates the round-trip time of the request
and response and adjusts its clock accordingly.

In a parallel and distributed computing environment,


the Cristian's algorithm can be implemented using
various parallel and distributed computing techniques
such as message passing, shared memory, or remote
procedure calls.

One way to parallelize the Cristian's algorithm is to


divide the network into multiple partitions and assign a
server node to each partition. Each client node in a
partition then sends a request to its corresponding
server node, and the server responds with its current
time. The client then calculates the round-trip time and
adjusts its clock accordingly. This approach can reduce
the overall response time by reducing the number of
nodes that need to communicate with each other.

Another way to parallelize the Cristian's algorithm is to


use a distributed hash table (DHT) to distribute the time
synchronization requests across multiple nodes in the
network. In this approach, each node stores its own
clock value in the DHT, and a client node retrieves the
clock values from multiple nodes and calculates an
average time value.

In summary, the Cristian's algorithm can be parallelized


and distributed using various techniques, and the choice
of technique depends on the characteristics of the
computing environment and the specific requirements
of the application.

CODE-
#include <iostream>
#include <chrono>
#include <omp.h>

using namespace std;


int main() {
int num_servers = 4;
double server_times[num_servers] = {10.0, 15.0, 12.0, 9.0};
double client_time = 0.0;
double total_time = 0.0;
double avg_time = 0.0;

#pragma omp parallel for reduction(+:total_time)


for (int i = 0; i < num_servers; i++) {
int sleep_time = rand() % 1000;
this_thread::sleep_for(chrono::milliseconds(sleep_time)
);

double server_time = server_times[i];


double round_trip_time = (omp_get_wtime() -
client_time) / 2;

double adjusted_time = server_time + round_trip_time;


total_time += adjusted_time;
}

avg_time = total_time / num_servers;

cout << "Client time: " << client_time << endl;


cout << "Server times: ";
for (int i = 0; i < num_servers; i++) {
cout << server_times[i] << " ";
}
cout << endl;
cout << "Adjusted times: ";
for (int i = 0; i < num_servers; i++) {
double server_time = server_times[i];
double round_trip_time = (omp_get_wtime() -
client_time) / 2;
double adjusted_time = server_time + round_trip_time;
cout << adjusted_time << " ";
}
cout << endl;
cout << "Average time: " << avg_time << endl;

return 0;
}
#include <iostream>
#include <chrono>
#include <omp.h>

using namespace std;

int main() {
int num_servers = 4;
double server_times[num_servers] = {10.0, 15.0, 12.0, 9.0};
double client_time = 0.0;
double total_time = 0.0;
double avg_time = 0.0;

#pragma omp parallel for reduction(+:total_time)


for (int i = 0; i < num_servers; i++) {
int sleep_time = rand() % 1000;
this_thread::sleep_for(chrono::milliseconds(sleep_time));

double server_time = server_times[i];


double round_trip_time = (omp_get_wtime() - client_time) / 2;

double adjusted_time = server_time + round_trip_time;


total_time += adjusted_time;
}

avg_time = total_time / num_servers;

cout << "Client time: " << client_time << endl;


cout << "Server times: ";
for (int i = 0; i < num_servers; i++) {
cout << server_times[i] << " ";
}
cout << endl;
cout << "Adjusted times: ";
for (int i = 0; i < num_servers; i++) {
double server_time = server_times[i];
double round_trip_time = (omp_get_wtime() - client_time) / 2;
double adjusted_time = server_time + round_trip_time;
cout << adjusted_time << " ";
}
cout << endl;
cout << "Average time: " << avg_time << endl;

return 0;
}

Output-

2. Berkeley Algorithm

The Berkeley algorithm is a time synchronization


algorithm that involves a master clock and
multiple slave clocks. The basic idea of the
algorithm is to calculate the difference between
the master clock and each slave clock, and then
adjust the slave clocks based on this difference to
synchronize them with the master clock. The
algorithm can be parallelized and distributed to
improve its performance and scalability.
#include <iostream>
#include <omp.h>
#include <chrono>
#include <thread>
using namespace std;

int main() {
int id, n;
int max_delay = 5;

omp_set_num_threads(4);

#pragma omp parallel private(id, n)


{
id = omp_get_thread_num();
n = omp_get_num_threads();

#pragma omp barrier

int local_clock = id;


while (true) {

#pragma omp critical


{
int min_clock = local_clock;
#pragma omp flush
for (int i = 0; i < n; i++) {
if (i != id) {
#pragma omp flush
if (local_clock > omp_get_thread_num())
{
local_clock = omp_get_thread_num();
#pragma omp flush
}
#pragma omp flush
if (min_clock > local_clock) {
min_clock = local_clock;
#pragma omp flush
}
}
}
local_clock = min_clock + 1;
#pragma omp flush
}

cout << "Process " << id << " enters critical


section at " << local_clock << endl;
this_thread::sleep_for(chrono::seconds(rand() %
max_delay));
cout << "Process " << id << " leaves critical
section at " << local_clock << endl;
}
}

return 0;
}
#include <iostream>
#include <omp.h>
#include <chrono>
#include <thread>
using namespace std;

int main() {
int id, n;
int max_delay = 5;

omp_set_num_threads(4);

#pragma omp parallel private(id, n)


{
id = omp_get_thread_num();
n = omp_get_num_threads();

#pragma omp barrier

int local_clock = id;


while (true) {

#pragma omp critical


{
int min_clock = local_clock;
#pragma omp flush
for (int i = 0; i < n; i++) {
if (i != id) {
#pragma omp flush
if (local_clock > omp_get_thread_num()) {
local_clock = omp_get_thread_num();
#pragma omp flush
}
#pragma omp flush
if (min_clock > local_clock) {
min_clock = local_clock;
#pragma omp flush
}
}
}
local_clock = min_clock + 1;
#pragma omp flush
}

cout << "Process " << id << " enters critical section at " <<
local_clock << endl;
this_thread::sleep_for(chrono::seconds(rand() % max_delay));
cout << "Process " << id << " leaves critical section at " <<
local_clock << endl;
}
}

return 0;
}
Output-

3. sender-initiated load balancing algorithm


The sender-initiated load balancing algorithm is used in
distributed systems to balance the workload among multiple
processes. In this algorithm, the sender process determines the
workload of each process and assigns tasks to processes with
lower workload.
Here's a high-level description of the sender-initiated load
balancing algorithm:

The sender process collects the current workload of each


process.
The sender process determines the task to be assigned.
The sender process selects the process with the lowest
workload.
The sender process sends the task to the selected process.
The selected process performs the task.

Code-
#include <iostream>
#include <mpi.h>
#include <vector>
#include <algorithm>
#include <numeric>
using namespace std;

int main(int argc, char** argv) {


int size, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);

vector<int> workload(size);
for (int i = 0; i < size; i++) {
workload[i] = rand() % 10;
}

int task_count = 100;


vector<int> tasks(task_count);

if (rank == 0) {

for (int i = 0; i < task_count; i++) {

int dest = distance(workload.begin(),


min_element(workload.begin(), workload.end()));
MPI_Send(&tasks[i], 1, MPI_INT, dest, i,
MPI_COMM_WORLD);

workload[dest]++;
}
}
// receiver processes
else {
for (int i = 0; i < task_count; i++) {
int task;
MPI_Recv(&task, 1, MPI_INT, 0, i, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);

cout << "Process " << rank << " received task " <<
task << endl;
}
}

MPI_Finalize();
return 0;
}

.#include <iostream>
#include <mpi.h>
#include <vector>
#include <algorithm>
#include <numeric>
using namespace std;

int main(int argc, char** argv) {


int size, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);

vector<int> workload(size);
for (int i = 0; i < size; i++) {
workload[i] = rand() % 10;
}

int task_count = 100;


vector<int> tasks(task_count);
if (rank == 0) {

for (int i = 0; i < task_count; i++) {

int dest = distance(workload.begin(),


min_element(workload.begin(), workload.end()));

MPI_Send(&tasks[i], 1, MPI_INT, dest, i, MPI_COMM_WORLD);

workload[dest]++;
}
}
// receiver processes
else {
for (int i = 0; i < task_count; i++) {
int task;
MPI_Recv(&task, 1, MPI_INT, 0, i, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);

cout << "Process " << rank << " received task " << task << endl;
}
}

MPI_Finalize();
return 0;
}

Output-

You might also like