Question
Machine Learning Python: **How do I get rid of lines of code bolded in class Ridge Regression and replace them with functions sgd(stochastic gradient descent)
Machine Learning Python:
**How do I get rid of lines of code bolded in class Ridge Regression and replace them with functions sgd(stochastic gradient descent) and predict? I want to use the functions sgd and predict for the Ridge Regression class.
Code:
class RidgeRegression() :
def __init__( self, learning_rate, iterations, l2_penality ) :
self.learning_rate = learning_rate
self.iterations = iterations
self.l2_penality = l2_penality
# Function for model training
def fit( self, X, Y ) :
# no_of_training_examples, no_of_features
self.m, self.n = X.shape
# weight initialization
self.W = np.zeros( self.n )
self.b = 0
self.X = X
self.Y = Y
# gradient descent learning
for i in range( self.iterations ) :
self.update_weights()
return self
# Helper function to update weights in gradient descent
def update_weights( self ) :
Y_pred = self.predict( self.X )
# calculate gradients
dW = ( - ( 2 * ( self.X.T ).dot( self.Y - Y_pred ) ) +
( 2 * self.l2_penality * self.W ) ) / self.m
db = - 2 * np.sum( self.Y - Y_pred ) / self.m
# update weights
self.W = self.W - self.learning_rate * dW
self.b = self.b - self.learning_rate * db
return self
# Hypothetical function h( x )
def predict( self, X ) :
return X.dot( self.W ) + self.b
def sgd(train_data,learning_rate,n_iter,k,divideby): # Initially we will keep our W and B as 0 as per the Training Data w=np.zeros(shape=(1,train_data.shape[1]-1)) b=0 cur_iter=1 while(cur_iter<=n_iter):
# We will create a small training data set of size K temp=train_data.sample(k) # We create our X and Y from the above temp dataset y=np.array(temp['Electrical Energy Output']) x=np.array(temp.drop('Electrical Energy Output',axis=1)) # We keep our initial gradients as 0 w_gradient=np.zeros(shape=(1,train_data.shape[1]-1)) b_gradient=0 for i in range(k): # Calculating gradients for point in our K sized dataset prediction=np.dot(w,x[i])+b w_gradient=w_gradient+(-2)*x[i]*(y[i]-(prediction)) b_gradient=b_gradient+(-2)*(y[i]-(prediction)) #Updating the weights(W) and Bias(b) with the above calculated Gradients w=w-learning_rate*(w_gradient/k) b=b-learning_rate*(b_gradient/k) # Incrementing the iteration value cur_iter=cur_iter+1 #Dividing the learning rate by the specified value learning_rate=learning_rate/divideby return w,b #Returning the weights and Bias
def predict(x,w,b): y_pred=[] for i in range(len(x)): y=np.asscalar(np.dot(w,x[i])+b) y_pred.append(y) return np.array(y_pred)
Step by Step Solution
There are 3 Steps involved in it
Step: 1
Get Instant Access to Expert-Tailored Solutions
See step-by-step solutions with expert insights and AI powered tools for academic success
Step: 2
Step: 3
Ace Your Homework with AI
Get the answers you need in no time with our AI-driven, step-by-step assistance
Get Started