Answered step by step
Verified Expert Solution
Link Copied!

Question

1 Approved Answer

def generate_values(): import numpy as np from sklearn.datasets import make_classification ########################################## # Generate Naive Bayes Data ########################################## np.random.seed(6) X1 = np.random.choice([0,1], size=(100_000,10), p=[0.8, 0.2]) betas

def generate_values():

import numpy as np

from sklearn.datasets import make_classification

##########################################

# Generate Naive Bayes Data

##########################################

np.random.seed(6)

X1 = np.random.choice([0,1], size=(100_000,10), p=[0.8, 0.2])

betas = np.random.uniform(0, 1, size=10)

z = X1 @ betas - 1

p = 1 / (1 + np.exp(-z))

y1 = np.where(p

Nspam = 462 + 500

Nham = 10000 - Nspam

s0 = np.argwhere(y1 == 0)[:Nham].flatten()

s1 = np.argwhere(y1 == 1)[:Nspam].flatten()

sel = np.hstack([s0,s1])

sel = np.random.choice(sel, size=10_000, replace=False)

X1 = X1[sel, :]

y1 = y1[sel]

##########################################

# Generate Logistic Regression Data

##########################################

X2, y2 = make_classification(n_samples=1000, n_features=4, n_informative=4, n_redundant=0, n_classes=2, random_state=7)

return X1, y1, X2.round(3), y2

def unit_test_1(add_ones):

import numpy as np

global X2

try:

XE = add_ones(X2)

except:

print('Function results in an error.')

return

if not isinstance(XE, np.ndarray):

print(f'Returned object is not an array. It has type {type(XE)}.')

return

if XE.shape != (1000, 5):

print('Shape of returned array is not correct.')

return

if (XE[:,0] == 1).sum() != 1000:

print('First column does not consist entirely of ones.')

return

if np.sum(XE[:,1:] == X2) != 4000:

print('Returned array does not contain copy of feature array.')

return

print('All tests passed.')

def unit_test_2(predict_proba):

import numpy as np

global X2

betas1 = [2, -1, -3, 2, 3]

betas2 = [5, 1, -4, 3, 5]

try:

p1 = predict_proba(X2, betas1)

p2 = predict_proba(X2, betas2)

except:

print('Function results in an error.')

return

if not isinstance(p1, np.ndarray):

print(f'Returned object is not an array. It has type {type(p1)}.')

return

if p1.shape != (1000,):

print('Shape of returned array is not correct.')

return

check1 = list(p1[:5].round(4)) == [0.2505, 0.996, 0.027, 0.0046, 0.1021]

check2 = list(p2[:5].round(4)) == [0.0166, 0.9998, 0.9388, 0.0009, 0.662]

if not (check1 and check2):

print('Probability estimates are incorrect.')

return

print('All tests passed.')

def unit_test_3(calculate_NLL):

import numpy as np

global X2, y2

betas1 = [2, -1, -3, 2, 3]

betas2 = [5, 1, -4, 3, 5]

try:

nll1 = calculate_NLL(X2, y2, betas1)

nll2 = calculate_NLL(X2, y2, betas2)

except:

print('Function results in an error.')

return

try:

nll1 = float(nll1)

nll2 = float(nll2)

except:

print('Returned value has non-numerical data type.')

return

check1 = round(nll1, 2) == 450.23

check2 = round(nll2, 2) == 118.16

if not (check1 and check2):

print('NLL score returned is incorrect.')

return

print('All tests passed.')

X1, y1, X2, y2 = generate_values()

-------

def generate_values():

import numpy as np

from sklearn.datasets import make_classification

N = 1000

np.random.seed(1)

x1 = np.random.normal(500, 100, N).astype(int)

np.random.seed(597474)

ep = np.random.normal(0, 0.75, N)

y1 = 16 * (x1/1000) ** 5.8 * np.exp(ep)

y1 = y1.round(4)

X2, y2 = make_classification(n_samples=1000, n_classes=3, n_features=100, n_informative=20,

n_redundant=10, class_sep=1.6, random_state=16)

np.random.seed(1)

X3 = np.random.normal(100, 20, size=(100,5))

betas = np.array([15, 32, 4.5, -27, -17.5])

y3 = X3 @ betas + np.random.normal(0, 400, 100)

X3_ones = np.hstack([np.ones((100,1)), X3])

return x1, y1, X2, y2, X3, y3, X3_ones

x1, y1, X2, y2, X3, y3, X3_ones = generate_values()

------ code with error*************************************************

def add_ones(X):

"""

Adds a column of ones to the feature matrix.

"""

ones = np.ones((X.shape[0], 1))

return np.hstack((ones, X))

def predict_proba(X, betas):

"""

Calculates the probability of belonging to class 1 for each data point.

"""

z = X @ betas

p = 1 / (1 + np.exp(-z))

return p

def calculate_NLL(X, y, betas):

"""

Calculates the negative log-likelihood (NLL) score.

"""

p = predict_proba(X, betas)

y_log = np.log(p)

NLL = -np.sum(y * y_log + (1 - y) * np.log(1 - p))

return NLL

def train_logreg(x, y, alpha, steps):

"""

Trains a logistic regression model using gradient descent.

"""

XE = add_ones(x)

betas = np.zeros(XE.shape[1])

for i in range(steps):

p = predict_proba(XE, betas)

NLL = calculate_NLL(XE, y, betas)

gradient = XE.T @ (p - y)

betas = betas - alpha * gradient

if(i +1) % 10 == 0:

print(f'Iteration {i+1:003} - betas={betas.round(3)}, NLL={NLL:2f}')

return betas

# Generate the data

X1, y1, X2, y2 = generate_values()

# Train the model with different learning rates

learning_rates = [0.01, 0.001, 0.0001]

for learning_rate in learning_rates:

print(f'Training with learning rate: {learning_rate}')

betas = train_logreg(X2, y2, alpha=learning_rate, steps=3000)

# Check if the NLL score is close to 74.27

NLL = calculate_NLL(X2, y2, betas)

if round(NLL, 2) == 74.27:

print(f'Model trained with learning rate {learning_rate} achieved NLL score of 74.27.')

break

else:

print(f'Model trained with learning rate {learning_rate} did not achieve desired NLL score.')

image text in transcribed

intial question:

image text in transcribed

image text in transcribed print(f'Model trained with learning rate \{learning_rate\} did not achieve desired NLL score.') ValueError Traceback (nost recent call last) sipython-input-47-1a56953f643f> in () 79 N Generate the data 89 81x1,y1,x2,y2 = generate_values() 82 83 ValueError: too nany values to unpack (expected 4) In this problem, you will train a logistic regression model on x2 and y2. Part 7.A Use the cell below to define a function train_logreg(). The function should accept four parameters named x,y, alpha, and n. Descriptions of the parameters are as follows: - x is expected to be a 2D feature array. - y is expected to be a 1D label array. - alpha is expected to be a learning rate. - steps is the number of iterations of gradient descent to perform during training. The function should apply gradient descent to determine optimal parameters for a logistic regression model. An outline of the steps that the function should perform is provided below. 1. The function add_ones() should be used to create an "extended" feature matrix XE . 2. A coeficient array named betas should be created with all values initialized to zero. The following code can be used for this task: betas = np.zeros(XE.shape[1]) 3. A loop should be executed for a number of iterations indicated by step. Each iteration of the loop should perform the tasks below. a. Use predict_proba() to generate probability estimates. b. Use calculate_NLL() to calculate the NLL score for the current iteration of the model. Store this value in a variable named NLL . c. Calculate the gradient. The lesson titled "Training a Logistic Regression Model" explains how to do this. d. Use the gradient descent update rule to update the parameter estimates. e. Print an update message every 10 iterations. (Code for this has been provided). 4. When the loop is finished, return betas. [ ] def train_logreg (x,y, alpha, steps): \# Add code here for i in range(steps): \# Add code here if (i+1)%10==0 : print ( f Iteration {i+1:003} betas ={ betas.round (3)}, NLL ={NLL:.2f}) return betas Part 7.B Apply your train_logreg() function to the training data stored in x2 and y2. Use steps=3e0 and experiment with the learning rate. Your final NLL score should be 74.27 . Store the optimal parameters returned by the function in a variable. [ ]

Step by Step Solution

There are 3 Steps involved in it

Step: 1

blur-text-image

Get Instant Access to Expert-Tailored Solutions

See step-by-step solutions with expert insights and AI powered tools for academic success

Step: 2

blur-text-image

Step: 3

blur-text-image

Ace Your Homework with AI

Get the answers you need in no time with our AI-driven, step-by-step assistance

Get Started

Recommended Textbook for

Essential SQLAlchemy Mapping Python To Databases

Authors: Myers, Jason Myers

2nd Edition

1491916567, 9781491916568

More Books

Students also viewed these Databases questions

Question

Explain the importance of HRM to all employees.

Answered: 1 week ago

Question

Discuss the relationship between a manager and an HR professional.

Answered: 1 week ago

Question

Outline demographic considerations.

Answered: 1 week ago