Answered step by step
Verified Expert Solution
Link Copied!

Question

1 Approved Answer

Develop a back propagation neural network to solve the regression problem: f(x) = 1/x. Be sure to create a testing and training set. My python

Develop a back propagation neural network to solve the regression problem: f(x) = 1/x. Be sure to create a testing and training set.

My python code below does not accurately model the input function (1/x), but I can't figure out why the low X inputs are not learned properly.

import numpy as np import matplotlib.pyplot as plt from random import randrange from random import random

class MultilayerNnRegressor: #vectorized denormalization of target value def denormalize_target(self, target, target_minmax): for i in range(len(target)): target[i] = (target[i] * (target_minmax[1] - target_minmax[0])) + target_minmax[0] #denormalize individual target_value def rescale_target(self, target_value, target_minmax): return (target_value * (target_minmax[1] - target_minmax[0])) + target_minmax[0]

#Setup weights for each layer in network def initialize_network(self, n_inputs, n_hidden, n_hidden2, n_outputs): network = list() # hidden layer has 'n_hidden' neuron with 'n_inputs' input weights plus the bias hidden_layer = [{'weights':[random()*0.1 for i in range(n_inputs + 1)]} for i in range(n_hidden)] network.append(hidden_layer) if n_hidden2 != 0: hidden_layer2 = [{'weights':[random()*0.01 for i in range(n_hidden + 1)]} for i in range(n_hidden2)] network.append(hidden_layer2) output_layer = [{'weights':[random()*0.01 for i in range(n_hidden2 + 1)]} for i in range(n_outputs)] network.append(output_layer) else: output_layer = [{'weights':[random()*0.01 for i in range(n_hidden + 1)]} for i in range(n_outputs)] network.append(output_layer) return network

#Neuron activation function def activate(self, weights, inputs): activation = weights[-1] # Bias for i in range(len(weights) - 1): activation += weights[i] * inputs[i] return activation

#sigmoid transfer function def transfer(self, activation): return 1.0 / (1.0 + np.exp(-activation)) #sigmoid derivative function for backprop of errors def transfer_derivative(self, output): return output * (1.0 - output)

def forward_propagate(self, network, row): inputs = row for layer in network: new_inputs = [] for neuron in layer: neuron['activation'] = self.activate(neuron['weights'], inputs) neuron['output'] = self.transfer(neuron['activation']) new_inputs.append(neuron['output']) inputs = new_inputs return inputs[0] #Back propagate the error for each training sample def backward_propagate_error(self, network, expected): for i in reversed(range(len(network))): layer = network[i] errors = list() if i != len(network) - 1: for j in range(len(layer)): error = 0.0 for neuron in network[i + 1]: error += (neuron['weights'][j] * neuron['delta']) errors.append(error) else: for j in range(len(layer)): neuron = layer[j] errors.append((expected - neuron['output'])) for j in range(len(layer)): neuron = layer[j] neuron['delta'] = errors[j] * self.transfer_derivative(neuron['output'])

#update weights for each data sample during training def update_weights(self, network, row, l_rate): for i in range(len(network)): inputs = row[:-1] if i != 0: inputs = [neuron['output'] for neuron in network[i - 1]] for neuron in network[i]: for j in range(len(inputs)): neuron['weights'][j] += l_rate * neuron['delta'] * inputs[j] neuron['weights'][-1] += l_rate * neuron['delta'] * 1

#train neural network over each sample in the training set def train_network(self, network, train, l_rate, n_epoch, n_outputs, target_minmax): for epoch in range(n_epoch + 1): sum_error = 0 for row in train: # Forward Propagate return a single value output = self.forward_propagate(network, row) # Calculate loss output_rescaled = self.rescale_target(output, target_minmax) expected = row[len(row)-1] expected_rescaled = self.rescale_target(expected, target_minmax) sum_error += (expected_rescaled - output_rescaled) * (expected_rescaled - output_rescaled) # Back Propagate Error self.backward_propagate_error(network, expected) self.update_weights(network, row, l_rate) if (epoch % 100 == 0): print('>epoch=%d, lrate=%.3f, error(MSE)=%.3f, error(RMSE)=%.3f' % (epoch, l_rate, sum_error/float(len(train)),np.sqrt(sum_error/float(len(train))))) #Core function for neural network def back_propagation(self, train, test, l_rate, n_epoch, n_hidden, n_hidden2, target_minmax): n_inputs = len(train[0]) - 1 #initialize layers network = self.initialize_network(n_inputs, n_hidden, n_hidden2, 1) #train over training set to update weights self.train_network(network, train, l_rate, n_epoch, 1, target_minmax) predictions = list() #Run forward propagation over each test sample for predictions for row in test: prediction = self.forward_propagate(network, row) predictions.append(prediction) return(np.array(predictions))

#function to remove issue with base python .remove() (causes error in cross validation) def removearray(L,arr): ind = 0 size = len(L) while ind != size and not np.array_equal(L[ind],arr): ind += 1 if ind != size: L.pop(ind) else: raise ValueError('array not found in list.')

#determine min/max values for each column def dataset_minmax(dataset): stats = [[min(column), max(column)] for column in zip(*dataset)] return stats #Normalize entire dataset def normalize_dataset_regression(dataset, minmax): for row in dataset: for i in range(len(row)): row[i] = (row[i] - minmax[i][0]) / (minmax[i][1] - minmax[i][0]) #Normalize entire dataset def denormalize_dataset_regression(dataset, minmax): for row in dataset: for i in range(len(row)): row[i] = (row[i] * (minmax[i][1] - minmax[i][0])) + minmax[i][0]

####################### PARAMETERS ############### n_folds = 5 l_rate = 0.25 n_epoch = 1000 n_hidden = 10 n_hidden2 = 0 #Generate Dataset trainDataMatrix = np.array([np.linspace(0.1,10,301), 1.0/np.linspace(0.1,10,301)]).T testDataMatrix = np.array([np.linspace(0.1,15,150), 1.0/np.linspace(0.1,15,150)]).T #setup Input Data in plot plt.plot(trainDataMatrix[:,0], trainDataMatrix[:,1], label='Input Data (f(x)=1/x)') plt.legend(loc='upper left')

minmax = np.array(dataset_minmax(trainDataMatrix)) target_minmax = minmax[-1] normalize_dataset_regression(trainDataMatrix, minmax)

#individual Training-training run for plotting plotnn = MultilayerNnRegressor() test = plotnn.back_propagation(trainDataMatrix, trainDataMatrix, l_rate, n_epoch, n_hidden, n_hidden2, target_minmax)

# Rescale the predictions actual = np.array([row[-1] for row in trainDataMatrix]) # Rescale the actuals plotnn.denormalize_target(actual, target_minmax) plotnn.denormalize_target(test, target_minmax) denormalize_dataset_regression(trainDataMatrix,minmax) # Plot output for test data == train data plt.plot(trainDataMatrix[:,0], test, label='Predicted (f(x)=1/x)') plt.plot(trainDataMatrix[:,0], actual, label='Output (y_hat = f(x))') plt.title('Predicted and Training Expected (Denormalized)') plt.xlabel('input value (x) denormalized') plt.ylabel('output value (y) denormalized') plt.legend(loc='upper right') plt.show()

###### Normalize datasets for algorithm execution on extrapolated test data and plot minmax = np.array(dataset_minmax(trainDataMatrix)) target_minmax = minmax[-1] normalize_dataset_regression(trainDataMatrix, minmax)

test_minmax = np.array(dataset_minmax(testDataMatrix)) test_target_minmax = test_minmax[-1] normalize_dataset_regression(testDataMatrix, test_minmax) ### Execute training and test on new data plotnn = MultilayerNnRegressor() test = plotnn.back_propagation(trainDataMatrix, testDataMatrix, l_rate, n_epoch, n_hidden, n_hidden2, test_target_minmax)

actual = np.array([row[-1] for row in testDataMatrix]) # Rescale the actuals and predictions plotnn.denormalize_target(test, test_target_minmax) plotnn.denormalize_target(actual, test_target_minmax) denormalize_dataset_regression(trainDataMatrix,minmax) denormalize_dataset_regression(testDataMatrix,test_minmax) #Plot extrapolation plt.plot(testDataMatrix[:,0], test, label='Predicted (f(x)=1/x)') plt.plot(testDataMatrix[:,0], actual, label='Output (y_hat = f(x))') plt.title('Predicted and Test Expected (Denormalized)') plt.xlabel('input value (x) denormalized NN not trained from X=10-15') plt.ylabel('output value (y) denormalized') plt.legend(loc='upper right') plt.show()

Step by Step Solution

There are 3 Steps involved in it

Step: 1

blur-text-image

Get Instant Access to Expert-Tailored Solutions

See step-by-step solutions with expert insights and AI powered tools for academic success

Step: 2

blur-text-image

Step: 3

blur-text-image

Ace Your Homework with AI

Get the answers you need in no time with our AI-driven, step-by-step assistance

Get Started

Recommended Textbook for

Machine Learning And Knowledge Discovery In Databases European Conference Ecml Pkdd 2022 Grenoble France September 19 23 2022 Proceedings Part 4 Lnai 13716

Authors: Massih-Reza Amini ,Stephane Canu ,Asja Fischer ,Tias Guns ,Petra Kralj Novak ,Grigorios Tsoumakas

1st Edition

3031264118, 978-3031264115

More Books

Students also viewed these Databases questions

Question

Prove Chebyshevs inequality for the discrete case.

Answered: 1 week ago

Question

understand the key issues concerning international assignments

Answered: 1 week ago