Answered step by step
Verified Expert Solution
Question
1 Approved Answer
Is this code correct? import tensorflow as tf import numpy as np def read _ parse _ a 9 a ( ) : labels =
Is this code correct? import tensorflow as tf import numpy as np def readparseaa: labels data with openaatxt as f: #ijcnntxt aatxt for line in f: xs line.split h for s in xs:: s sstrip if lens: continue kv ssplit: hintk floatv data.appendh if xs: labels.append else: labels.append return data labels def predictionw data: return npexpnpdotwdata.transpose #computes the average logloss for given prediction def loglosspred labels: Lilabelsnplogpredlabelsnplogpred return sumLilenpred def gradientn w data, labels: return n npdotdatatransposepredictionwdata labels n #number of features features, labels readparseaa featuresnparrayfeatures labels nparraylabels Bnpeyen #use either identity or compute Hessian in first step for BFGS wnpzerosn #initialize weight vector predictionspredictionwfeatures lossloglosspredictionslabels gradgradientnwfeatures,labels printInitial gradient shape:", grad.shape Initial gradient shape: def wolfew p data, labels, gradfgradient, lossflogloss predfprediction, alpha c c: gradwgradflenw w data, labels for i in range: if lossfpredfwalphap data labels loglosspredfw data labelscalphanpdotgradw p: alpha elif npdotgradflenw walphap data, labels p c npdotgradw p: alpha elif i : raise Exceptionwolfe doesn't finish" else: break return alpha def dfpw B data, labels, predfprediction, gradfgradient, lossflogloss maxiter tol: w w Binv nplinalg.invB # We only compute the inverse at initialization gradw gradflenw w data, labels for i in rangemaxiter: # Step : p npdotBinv, gradw # Use npdot instead of npouter # Step : alpha wolfew p data, labels # Step : s alpha p wnew w s gradnew gradflenw wnew, data, labels # Step : if nplinalg.normgradnew gradw tol: break y gradnew gradw gradw gradnew sy npdots y # Step DFP Update: Bs npdotBinv, s Binv Binv npouters s npdots y npouterBs Bs npdots Bs # Step print: predictions predfw data loss lossfpredictions labels printiter: i loss:", loss w wnew return w n printDFP dfpnpzerosn npeyen features, labels DFP iter: loss: iter: loss: iter: loss: nan iter: loss: iter: loss: nan iter: loss: C:UsersMirkaAppDataLocalTempipykernelpy:: RuntimeWarning: divide by zero encountered in log Lilabelsnplogpredlabelsnplogpred C:UsersMirkaAppDataLocalTempipykernelpy:: RuntimeWarning: invalid value encountered in multiply Lilabelsnplogpredlabelsnplogpred iter: loss: nan iter: loss: nan iter: loss: nan iter: loss: nan iter: loss: inf iter: loss: nan iter: loss: nan iter: loss: inf iter: loss: nan iter: loss: iter: loss: nan iter: loss: nan iter: loss: iter: loss: nan iter: loss: iter: loss: nan iter: loss: iter: loss: iter: loss: iter: loss: iter: loss: iter: loss: iter: loss:
Ace Your Homework with AI
Get the answers you need in no time with our AI-driven, step-by-step assistance
Get Started
Is this code correct?
import tensorflow as tf
import numpy as np
def readparseaa:
labels
data
with openaatxt as f: #ijcnntxt aatxt
for line in f:
xs line.split
h
for s in xs::
s sstrip
if lens:
continue
kv ssplit:
hintk floatv
data.appendh
if xs:
labels.append
else:
labels.append
return data labels
def predictionw data:
return npexpnpdotwdata.transpose
#computes the average logloss for given prediction
def loglosspred labels:
Lilabelsnplogpredlabelsnplogpred
return sumLilenpred
def gradientn w data, labels:
return n npdotdatatransposepredictionwdata labels
n #number of features
features, labels readparseaa
featuresnparrayfeatures
labels nparraylabels
Bnpeyen #use either identity or compute Hessian in first step for BFGS
wnpzerosn #initialize weight vector
predictionspredictionwfeatures
lossloglosspredictionslabels
gradgradientnwfeatures,labels
printInitial gradient shape:", grad.shape
Initial gradient shape:
def wolfew p data, labels, gradfgradient, lossflogloss predfprediction, alpha c c:
gradwgradflenw w data, labels
for i in range:
if lossfpredfwalphap data labels loglosspredfw data labelscalphanpdotgradw p:
alpha
elif npdotgradflenw walphap data, labels p c npdotgradw p:
alpha
elif i :
raise Exceptionwolfe doesn't finish"
else:
break
return alpha
def dfpw B data, labels, predfprediction, gradfgradient, lossflogloss maxiter tol:
w w
Binv nplinalg.invB # We only compute the inverse at initialization
gradw gradflenw w data, labels
for i in rangemaxiter:
# Step :
p npdotBinv, gradw # Use npdot instead of npouter
# Step :
alpha wolfew p data, labels
# Step :
s alpha p
wnew w s
gradnew gradflenw wnew, data, labels
# Step :
if nplinalg.normgradnew gradw tol:
break
y gradnew gradw
gradw gradnew
sy npdots y
# Step DFP Update:
Bs npdotBinv, s
Binv Binv npouters s npdots y npouterBs Bs npdots Bs
# Step print:
predictions predfw data
loss lossfpredictions labels
printiter: i loss:", loss
w wnew
return w
n
printDFP
dfpnpzerosn npeyen features, labels
DFP
iter: loss:
iter: loss:
iter: loss: nan
iter: loss:
iter: loss: nan
iter: loss:
C:UsersMirkaAppDataLocalTempipykernelpy:: RuntimeWarning: divide by zero encountered in log
Lilabelsnplogpredlabelsnplogpred
C:UsersMirkaAppDataLocalTempipykernelpy:: RuntimeWarning: invalid value encountered in multiply
Lilabelsnplogpredlabelsnplogpred
iter: loss: nan
iter: loss: nan
iter: loss: nan
iter: loss: nan
iter: loss: inf
iter: loss: nan
iter: loss: nan
iter: loss: inf
iter: loss: nan
iter: loss:
iter: loss: nan
iter: loss: nan
iter: loss:
iter: loss: nan
iter: loss:
iter: loss: nan
iter: loss:
iter: loss:
iter: loss:
iter: loss:
iter: loss:
iter: loss:
iter: loss:
Ace Your Homework with AI
Get the answers you need in no time with our AI-driven, step-by-step assistance
Get Started