Answered step by step
Verified Expert Solution
Link Copied!

Question

1 Approved Answer

import numpy as np def loss ( w , b , xTr , yTr , C ) : INPUT: w : d

import numpy as np
def loss(w, b, xTr, yTr, C):
"""
INPUT:
w : d dimensional weight vector
b : scalar (bias)
xTr : nxd dimensional matrix (each row is an input vector)
yTr : n dimensional vector (each entry is a label)
C : scalar (constant that controls the tradeoff between l2-regularizer and hinge-loss)
OUTPUTS:
loss : the total loss obtained with (w, b) on xTr and yTr (scalar)
"""
Tests:
xTr_test, yTr_test = generate_data()
n, d = xTr_test.shape
# Check whether your loss() returns a scalar
def loss_test1():
w = np.random.rand(d)
b = np.random.rand(1)
loss_val = loss(w, b, xTr_test, yTr_test, 10)
return np.isscalar(loss_val)
# Check whether your loss() returns a nonnegative scalar
def loss_test2():
w = np.random.rand(d)
b = np.random.rand(1)
loss_val = loss(w, b, xTr_test, yTr_test, 10)
return loss_val >=0
# Check whether you implement l2-regularizer correctly
def loss_test3():
w = np.random.rand(d)
b = np.random.rand(1)
loss_val = loss(w, b, xTr_test, yTr_test, 0)
loss_val_grader = loss_grader(w, b, xTr_test, yTr_test, 0)
return (np.linalg.norm(loss_val - loss_val_grader)<1e-5)
# Check whether you implemented the squared hinge loss and not the standard hinge loss
# Note, loss_grader_wrong is the wrong implementation of the standard hinge-loss,
# so the results should NOT match.
def loss_test4():
w = np.random.randn(d)
b = np.random.rand(1)
loss_val = loss(w, b, xTr_test, yTr_test, 1)
badloss = loss_grader_wrong(w, b, xTr_test, yTr_test, 1)
return not(np.linalg.norm(loss_val - badloss)<1e-5)
# Check whether you implement square hinge loss correctly
def loss_test5():
w = np.random.randn(d)
b = np.random.rand(1)
loss_val = loss(w, b, xTr_test, yTr_test, 10)
loss_val_grader = loss_grader(w, b, xTr_test, yTr_test, 10)
return (np.linalg.norm(loss_val - loss_val_grader)<1e-5)
# Check whether you implement loss correctly
def loss_test6():
w = np.random.randn(d)
b = np.random.rand(1)
loss_val = loss(w, b, xTr_test, yTr_test, 100)
loss_val_grader = loss_grader(w, b, xTr_test, yTr_test, 100)
return (np.linalg.norm(loss_val - loss_val_grader)<1e-5)
runtest(loss_test1,'loss_test1')
runtest(loss_test2,'loss_test2')
runtest(loss_test3,'loss_test3')
runtest(loss_test4,'loss_test4')
runtest(loss_test5,'loss_test5')
runtest(loss_test6,'loss_test6')

Step by Step Solution

There are 3 Steps involved in it

Step: 1

blur-text-image

Get Instant Access to Expert-Tailored Solutions

See step-by-step solutions with expert insights and AI powered tools for academic success

Step: 2

blur-text-image

Step: 3

blur-text-image

Ace Your Homework with AI

Get the answers you need in no time with our AI-driven, step-by-step assistance

Get Started

Recommended Textbook for

Information Modeling And Relational Databases

Authors: Terry Halpin, Tony Morgan

2nd Edition

0123735688, 978-0123735683

More Books

Students also viewed these Databases questions