Answered step by step
Verified Expert Solution
Link Copied!

Question

1 Approved Answer

I use this model: # Load the datasets train _ data = datasets.MNIST ( root = 'data', train = True, transform = transforms.Compose ( [

I use this model: # Load the datasets
train_data = datasets.MNIST(
root = 'data',
train = True,
transform = transforms.Compose([transforms.ToTensor()]),
download = True
)
test_data = datasets.MNIST(
root = 'data',
train = False,
transform = transforms.Compose([transforms.ToTensor()]),
download = True) train_loader = torch.utils.data.DataLoader(train_data,
batch_size=100,
num_workers=0)
test_loader = torch.utils.data.DataLoader(test_data,
batch_size=100,
num_workers=0)
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1= nn.Sequential(
nn.Conv2d(
in_channels=1,
out_channels=16,
kernel_size=5,
stride=1,
padding=2,
),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
)
self.conv2= nn.Sequential(
nn.Conv2d(16,32,5,1,2),
nn.ReLU(),
nn.MaxPool2d(2),
)
self.out = nn.Linear(32*7*7,10)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
# flatten the output of conv2 to (batch_size, 32*7*7)
x = x.view(x.size(0),-1)
output = self.out(x)
return output # Instantiate the model, define the loss function and the optimizer
cnn = CNN()
loss_func = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(cnn.parameters(), lr =0.01) # Training the model
num_epochs =10
# Make sure cnn is in training mode
cnn.train()
# Setting up the empty errors array
errors=[]
# How many batches are there in our training set?
total_step = len(train_loader)
for epoch in range(num_epochs):
for i,(images, labels) in enumerate(train_loader):
output = cnn(images) # Forward pass: Compute predicted y by passing x to the model
curr_error = loss_func(output, labels) # Compute loss
errors.append(curr_error.item()) # Append the current error to the errors list
optimizer.zero_grad() # Clear gradients for this training step
curr_error.backward() # Backpropagation: compute gradients
optimizer.step() # Apply gradients (update weights)
# Print training status every 100 batches
if (i+1)%100==0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch +1, num_epochs, i +1, total_step, curr_error.item())) cnn.eval()
num_correct=0
for images, labels in test_loader:
test_output = cnn(images)
pred_y = torch.max(test_output, 1)[1].data.squeeze()
num_correct +=(pred_y == labels).sum().item() # count the correct number
idx_mask =((pred_y == labels)== False).nonzero()
accuracy=num_correct/len(test_data)
print('Test Accuracy of the model on the MNIST test images: %.2f'% accuracy) to Train a CNN to classify digits in the MNIST dataset and calculate the test accuracy. Now I want to Create a new
version of the dataset that has smoothed/filtered the images by applying a median filter twice, then use
the same model to calculate a new test accuracy. Create one more version of the dataset where the
images are smoothed in the same way and then sharpened, again use the same model to calculate a
new test accuracy. (Hint: Use transforms!).

Step by Step Solution

There are 3 Steps involved in it

Step: 1

blur-text-image

Get Instant Access to Expert-Tailored Solutions

See step-by-step solutions with expert insights and AI powered tools for academic success

Step: 2

blur-text-image

Step: 3

blur-text-image

Ace Your Homework with AI

Get the answers you need in no time with our AI-driven, step-by-step assistance

Get Started

Recommended Textbook for

Microsoft Visual Basic 2008 Comprehensive Concepts And Techniques

Authors: Gary B. Shelly, Corinne Hoisington

1st Edition

1423927168, 978-1423927167

More Books

Students also viewed these Databases questions