Answered step by step
Verified Expert Solution
Link Copied!

Question

1 Approved Answer

using LinearAlgebra, Zygote, ForwardDiff, Printf using CSV , DataFrames using StatsBase: mean using Parameters using Distributions using Random using BSON using ProgressMeter using ProgressBars ###

using LinearAlgebra, Zygote, ForwardDiff, Printf
using CSV, DataFrames
using StatsBase: mean
using Parameters
using Distributions
using Random
using BSON
using ProgressMeter
using ProgressBars
### Below you will implement your linear regression object from scratch.
###
### you will also imply some penalty methods as well.
#### ----- ###
#### Before getting started you should write your student_number in integer format
const student_number::Int64=1000929216 ## <---replace 0 by your student_number
### ---- ###
### This hw is a bit harder than the usual ones, so you may want to
### take a look at --Julia official website
### Before you get started, --- you gotta look at do end syntax of Julia
### Instead of mean square loss we shall use Huber loss which
### sometimes performs better when your dataset contains some out of distribution points
### We split it into two parts the first one is for scalars the second one is for vectors
### remember that you will multiple dispatch!!!
function huber_loss(y_pred::T, y_true::T; \delta ::Float64=0.1) where T <: Real
loss = abs(y_pred - y_true)
if loss <=\delta
return 0.5* loss^2
else
return \delta *(loss -0.5*\delta )
end
end
function huber_loss(y_pred::AbstractArray{T}, y_true::AbstractArray{T}) where T <: Real
if length(y_pred)!= length(y_true)
throw(ArgumentError("Input arrays must have the same length"))
end
n = length(y_pred)
total_loss =0.0
for i in 1:n
total_loss += huber_loss(y_pred[i], y_true[i])
end
return total_loss / n
end
function unit_test_huber_loss()::Bool
Random.seed!(0)
@assert huber_loss(1.0,1.0)==0.0
@assert huber_loss(1.0,2.0; \delta =0.9)==0.49500000000000005
@assert isapprox(huber_loss(randn(100,100),randn(100,100)),0.10791842, atol =1e-2)
@assert isapprox(huber_loss(randn(100),randn(100)),0.107945, atol =1e-2)
@info "You can not stop now comrade!!! jump to the next exercise!!!"
return 1
end
## See you have implemented huber_loss() well??
unit_test_huber_loss()
### create a roof for the logistic regression LogisticClassifier
abstract type LinearRegression end
mutable struct linear_regression <: LinearRegression
## This part is given to you!!!
## Realize that we have fields: \theta and bias.
\theta ::AbstractVector
bias::Real
linear_regression(n::Int64)= new(0.004*randn(n), zero(1))
end
# Define the predict function for linear regression
function predict(model::linear_regression, X::AbstractVector)
return dot(model.\theta , X)+ model.bias
end
### write the forwardpass function
function (lr::linear_regression)(X::Matrix{T}) where T<:Real
## This dude is the forward pass function!!!
m, n = size(X)
predictions = zeros(m)
for i in 1:m
predictions[i]= predict(lr, X[i, :])
end
return predictions
end
function unit_test_forward_pass()::Bool
try
linear_regression(20)(randn(10,20))
catch ERROR
error("SEG-FAULT!!!!")
end
@info "Real test started!!!!"
for i in ProgressBar(1:10000)
sleep(0.0001)
lr = linear_regression(3)
x = randn(2,3)
@assert lr(x)== x*lr.\theta .+ lr.bias
end
@info "Oki doki!!!"
return 1
end
### Let's give a try!!!!!!
unit_test_forward_pass()
## we shall now implement fit! method!!!
## before we get ready run the next 5 lines to see in this setting grad function returns a dictionary:
#=
lr = linear_regression(10)
val, grad = Zygote.withgradient(lr) do lr
norm(lr.\theta )^2+ lr.bias^2
end
=#
function update_grads!(lr::LinearRegression, learning_rate::Float64, grads)
## Here you will implement update_grads, this function returns nothing.
## Search for setfield! and getfield functions.
## x -= learning_rate*grads will happen here!!!
lr.\theta .-= learning_rate * grads[1] # Update \theta
lr.bias -= learning_rate * grads[2] # Update bias
return nothing
return nothing
end
function fit!(lr::linear_regression,
X::AbstractMatrix,
y::AbstractVector;
learning_rate::Float64=0.00001,
max_iter::Integer =5,
\lambda ::Float64=0.01)
for iter in 1:max_iter
# Forward pass
y_pred = lr(X)
# Compute Huber loss
loss = huber_loss(y_pred, y)
# Compute gradients using Zygote
grads = Zygote.gradient(lr -> huber_loss(lr(X), y), lr)
# Update gradients with regularization term
grads_tuple =(grads.\theta .+2*\lambda * lr.\theta , grads.bias)
# Update model parameters
lr.\theta , lr.bias = grads_tuple
# Update progress bar
next!(progress)
end
return nothing
end
## Let's give a try!!!
lr = linear_regression(20)
X = randn(100,20)
y = randn(100)
fit!(lr, X, y; learning_rate =0.00001, max_iter =10000)
what is wrong with this code?

Step by Step Solution

There are 3 Steps involved in it

Step: 1

blur-text-image

Get Instant Access to Expert-Tailored Solutions

See step-by-step solutions with expert insights and AI powered tools for academic success

Step: 2

blur-text-image

Step: 3

blur-text-image

Ace Your Homework with AI

Get the answers you need in no time with our AI-driven, step-by-step assistance

Get Started

Recommended Textbook for

DATABASE Administrator Make A Difference

Authors: Mohciine Elmourabit

1st Edition

B0CGM7XG75, 978-1722657802

More Books

Students also viewed these Databases questions

Question

Brief the importance of span of control and its concepts.

Answered: 1 week ago

Question

What is meant by decentralisation?

Answered: 1 week ago