Answered step by step
Verified Expert Solution
Link Copied!

Question

1 Approved Answer

Hi I have a bug in this neural network program, can someone help me get this working? thanks in advance import numpy as np import

Hi I have a bug in this neural network program, can someone help me get this working? thanks in advance

import numpy as np import matplotlib.pyplot as plt import matplotlib.pyplot as plt1

def sigmoid(x): return(1/(1 + np.exp(-x))) # Creating the Feed forward neural network # 1 Input layer(1, 30) # 1 hidden layer (1, 5) # 1 output layer(3, 3) def f_forward(x, w1, w2): # hidden z1 = x.dot(w1)# input from layer 1 a1 = sigmoid(z1)# out put of layer 2 # Output layer z2 = a1.dot(w2)# input of out layer a2 = sigmoid(z2)# output of out layer return(a2) # initializing the weights randomly def generate_wt(x, y): l =[] for i in range(x * y): l.append(np.random.randn()) return(np.array(l).reshape(x, y)) # for loss we will be using mean square error(MSE) def loss(out, Y): s =(np.square(out-Y)) s = np.sum(s)/len(y) return(s) # Back propagation of error def back_prop(x, y, w1, w2, alpha): # hidden layer z1 = x.dot(w1)# input from layer 1 a1 = sigmoid(z1)# output of layer 2 # Output layer z2 = a1.dot(w2)# input of out layer a2 = sigmoid(z2)# output of out layer # error in output layer d2 =(a2-y) d1 = np.multiply((w2.dot((d2.transpose()))).transpose(), (np.multiply(a1, 1-a1))) # Gradient for w1 and w2 w1_adj = x.transpose().dot(d1) w2_adj = a1.transpose().dot(d2) # Updating parameters w1 = w1-(alpha*(w1_adj)) w2 = w2-(alpha*(w2_adj)) return(w1, w2) def train(x, Y, w1, w2, alpha = 0.01, epoch = 10): acc =[] losss =[] for j in range(epoch): l =[] for i in range(len(x)): out = f_forward(x[i], w1, w2) l.append((loss(out, Y[i]))) w1, w2 = back_prop(x[i], y[i], w1, w2, alpha) print("epochs:", j + 1, "======== acc:", (1-(sum(l)/len(x)))*100) acc.append((1-(sum(l)/len(x)))*100) losss.append(sum(l)/len(x)) return(acc, losss, w1, w2)

def predict(x, w1, w2): Out = f_forward(x, w1, w2) print("sigmoid predictx:" , Out)

# Creating input data set # A a =[0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1] # B b =[0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0] # C c =[0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0] #outputs

ao =[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1] # B bo =[1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1] # C co =[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]

""" Convert the matrix of 0 and 1 into one hot vector so that we can directly feed it to the neural network, these vectors are then stored in a list x. """ x =[np.array(a).reshape(1, 30), np.array(b).reshape(1, 30), np.array(c).reshape(1, 30)]

y =[np.array(ao).reshape(1, 30), np.array(bo).reshape(1, 30), np.array(co).reshape(1, 30)]

w1 = generate_wt(30, 5) w2 = generate_wt(30, 5)

"""The arguments of train function are data set list x, correct labels y, weights w1, w2, learning rate = 0.1, no of epochs or iteration.The function will return the matrix of accuracy and loss and also the matrix of trained weights w1, w2""" print("Call Train") acc, losss, w1, w2 = train(x, y, w1, w2, 0.1, 100) print("acc:", acc) print("w1:", w1) print("w2:", w2) print("losss:", losss)

# ploting accuraccy plt1.plot(acc) plt1.ylabel('Accuracy') plt1.xlabel("Epochs:") plt1.show() # plotting Loss plt1.plot(losss) plt1.ylabel('Loss') plt1.xlabel("Epochs:") plt1.show()

Step by Step Solution

There are 3 Steps involved in it

Step: 1

blur-text-image

Get Instant Access with AI-Powered Solutions

See step-by-step solutions with expert insights and AI powered tools for academic success

Step: 2

blur-text-image

Step: 3

blur-text-image

Ace Your Homework with AI

Get the answers you need in no time with our AI-driven, step-by-step assistance

Get Started

Students also viewed these Databases questions