Question
TASK#3 (1.5 Mark): Run the code with various options (critical, atomic, reduction) of solving the race condition and see both the result and the execution
TASK#3 (1.5 Mark): Run the code with various options (critical, atomic, reduction) of solving the race condition and see both the result and the execution time.
parallel code:
import numpy as np
import mnist
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten
from tensorflow.keras.utils import to_categorical
import multiprocessing
def run_model(train_images, train_labels, test_images, test_labels, process_num):
# Assign GPU to this process.
with tf.device(f'/GPU:{process_num}'):
# Normalize the images.
train_images = (train_images / 255) - 0.5
test_images = (test_images / 255) - 0.5
# Reshape the images.
train_images = np.expand_dims(train_images, axis=3)
test_images = np.expand_dims(test_images, axis=3)
num_filters = 8
filter_size = 3
pool_size = 2
# Build the model.
model = Sequential([
Conv2D(num_filters, filter_size, input_shape=(28, 28, 1)),
MaxPooling2D(pool_size=pool_size),
Flatten(),
Dense(10, activation='softmax'),
])
# Compile the model.
model.compile(
'adam', loss='categorical_crossentropy', metrics=['accuracy'],
)
# Train the model.
model.fit(
train_images, to_categorical(train_labels),
epochs=3,
validation_data=(test_images, to_categorical(test_labels)),
)
# Predict on the first 5 test images.
predictions = model.predict(test_images[:5])
# Print our model's predictions.
print("Process:", process_num, "Predictions:", np.argmax(predictions, axis=1))
# Check our predictions against the ground truths.
print("Process:", process_num, "Ground truth:", test_labels[:5])
if __name__ == '__main__':
with multiprocessing.Manager() as manager:
train_images = mnist.train_images()
train_labels = mnist.train_labels()
test_images = mnist.test_images()
test_labels = mnist.test_labels()
chunk_size = train_images.shape[0] // 4
process_list = []
for i in range(4):
start = i * chunk_size
end = start + chunk_size
process = multiprocessing.Process(
target=run_model,
args=(
train_images[start:end],
train_labels[start:end],
test_images,
test_labels,
i,
)
)
process.start()
process_list.append(process)
for process in process_list:
process.join()
Step by Step Solution
There are 3 Steps involved in it
Step: 1
Get Instant Access to Expert-Tailored Solutions
See step-by-step solutions with expert insights and AI powered tools for academic success
Step: 2
Step: 3
Ace Your Homework with AI
Get the answers you need in no time with our AI-driven, step-by-step assistance
Get Started