Answered step by step
Verified Expert Solution
Link Copied!

Question

1 Approved Answer

Need an detailed analysis of what this code/model is doing. Thanks #Step 1: Import the required Python libraries: import numpy as np import matplotlib.pyplot as

Need an detailed analysis of what this code/model is doing. Thanks

#Step 1: Import the required Python libraries:

import numpy as np

import matplotlib.pyplot as plt

import keras

from keras.layers import Input, Dense, Reshape, Flatten, Dropout

from keras.layers import BatchNormalization, Activation, ZeroPadding2D

from keras.layers import LeakyReLU

from keras.layers.convolutional import UpSampling2D, Conv2D

from keras.models import Sequential, Model

from keras.optimizers import Adam,SGD

from keras.datasets import cifar10

#Step 2: Load the data.

#Loading the CIFAR10 data

(X, y), (_, _) = keras.datasets.cifar10.load_data()

#Selecting a single class of images

#The number was randomly chosen and any number

#between 1 and 10 can be chosen

X = X[y.flatten() == 8]

#Step 3: Define parameters to be used in later processes.

#Defining the Input shape

image_shape = (32, 32, 3)

latent_dimensions = 100

#Step 4: Define a utility function to build the generator.

def build_generator():

model = Sequential()

#Building the input layer

model.add(Dense(128 * 8 * 8, activation="relu",

input_dim=latent_dimensions))

model.add(Reshape((8, 8, 128)))

model.add(UpSampling2D())

model.add(Conv2D(128, kernel_size=3, padding="same"))

model.add(BatchNormalization(momentum=0.78))

model.add(Activation("relu"))

model.add(UpSampling2D())

model.add(Conv2D(64, kernel_size=3, padding="same"))

model.add(BatchNormalization(momentum=0.78))

model.add(Activation("relu"))

model.add(Conv2D(3, kernel_size=3, padding="same"))

model.add(Activation("tanh"))

#Generating the output image

noise = Input(shape=(latent_dimensions,))

image = model(noise)

return Model(noise, image)

#Step 5: Define a utility function to build the discriminator.

def build_discriminator():

#Building the convolutional layers

#to classify whether an image is real or fake

model = Sequential()

model.add(Conv2D(32, kernel_size=3, strides=2,

input_shape=image_shape, padding="same"))

model.add(LeakyReLU(alpha=0.2))

model.add(Dropout(0.25))

model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))

model.add(ZeroPadding2D(padding=((0,1),(0,1))))

model.add(BatchNormalization(momentum=0.82))

model.add(LeakyReLU(alpha=0.25))

model.add(Dropout(0.25))

model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))

model.add(BatchNormalization(momentum=0.82))

model.add(LeakyReLU(alpha=0.2))

model.add(Dropout(0.25))

model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))

model.add(BatchNormalization(momentum=0.8))

model.add(LeakyReLU(alpha=0.25))

model.add(Dropout(0.25))

#Building the output layer

model.add(Flatten())

model.add(Dense(1, activation='sigmoid'))

image = Input(shape=image_shape)

validity = model(image)

return Model(image, validity)

#Step 6: Define a utility function to display the generated images.

def display_images():

# Generate a batch of random noise

noise = np.random.normal(0, 1, (16, latent_dimensions))

# Generate images from the noise

generated_images = generator.predict(noise)

# Rescale the images to 0-1 range

generated_images = 0.5 * generated_images + 0.5

# Plot the generated images

plt.figure(figsize=(6, 6))

for i in range(generated_images.shape[0]):

plt.subplot(4, 4, i+1)

plt.imshow(generated_images[i])

plt.axis('off')

plt.tight_layout()

plt.show()

plt.close()

#Step 7: Build the GAN.

# Building and compiling the discriminator

discriminator = build_discriminator()

discriminator.compile(loss='binary_crossentropy',

optimizer=Adam(0.0002,0.5),

metrics=['accuracy'])

#Making the discriminator untrainable

#so that the generator can learn from fixed gradient

discriminator.trainable = False

# Building the generator

generator = build_generator()

#Defining the input for the generator

#and generating the images

z = Input(shape=(latent_dimensions,))

image = generator(z)

#Checking the validity of the generated image

valid = discriminator(image)

#Defining the combined model of the generator and the discriminator

combined_network = Model(z, valid)

combined_network.compile(loss='binary_crossentropy',

optimizer=Adam(0.0002,0.5))

#Step 8: Train the network.

num_epochs=15000

batch_size=32

display_interval=1000

losses=[]

#Normalizing the input

X = (X / 127.5) - 1.

#Defining the Adversarial ground truths

valid = np.ones((batch_size, 1))

#Adding some noise

valid += 0.05 * np.random.random(valid.shape)

fake = np.zeros((batch_size, 1))

fake += 0.05 * np.random.random(fake.shape)

for epoch in range(num_epochs):

#Training the Discriminator

#Sampling a random half of images

index = np.random.randint(0, X.shape[0], batch_size)

images = X[index]

#Sampling noise and generating a batch of new images

noise = np.random.normal(0, 1, (batch_size, latent_dimensions))

generated_images = generator.predict(noise)

#Training the discriminator to detect more accurately

#whether a generated image is real or fake

discm_loss_real = discriminator.train_on_batch(images, valid)

discm_loss_fake = discriminator.train_on_batch(generated_images, fake)

discm_loss = 0.5 * np.add(discm_loss_real, discm_loss_fake)

#Training the Generator

#Sampling noise and generating a batch of new images

noise = np.random.normal(0, 1, (batch_size, latent_dimensions))

misleading_targets = np.ones((batch_size, 1))

#Training the generator to generate images

#that pass the authenticity test

genr_loss = combined_network.train_on_batch(noise, misleading_targets)

# Display generated images at the end of every 1000th epoch

#Tracking the progress

if epoch % display_interval == 0:

display_images()

noise = np.random.normal(0, 1, (16, latent_dimensions))

generated_images = generator.predict(noise)

generated_images = 0.5 * generated_images + 0.5

plt.figure(figsize=(6, 6))

for i in range(generated_images.shape[0]):

plt.subplot(4, 4, i+1)

plt.imshow(generated_images[i])

plt.axis('off')

plt.tight_layout()

plt.savefig(f"epoch_{epoch+1}.png")

plt.close()

#Printing the progress and losses of the current epoch

print(f"Epoch {epoch} Discriminator Loss: {discm_loss[0]} Generator Loss: {genr_loss}")

Step by Step Solution

There are 3 Steps involved in it

Step: 1

blur-text-image

Get Instant Access to Expert-Tailored Solutions

See step-by-step solutions with expert insights and AI powered tools for academic success

Step: 2

blur-text-image

Step: 3

blur-text-image

Ace Your Homework with AI

Get the answers you need in no time with our AI-driven, step-by-step assistance

Get Started

Recommended Textbook for

More Books

Students also viewed these Databases questions

Question

Describe the Indian constitution and political system.

Answered: 1 week ago

Question

Explain in detail the developing and developed economy of India

Answered: 1 week ago

Question

Problem: Evaluate the integral: I = X 52+7 - 1)(x+2) dx

Answered: 1 week ago

Question

What is gravity?

Answered: 1 week ago

Question

What is the Big Bang Theory?

Answered: 1 week ago