Question
I want you to add the output of the F1 score, Precision, ROC AUC, and Cohen kappa to fit in the code below. Please don't
I want you to add the output of the F1 score, Precision, ROC AUC, and Cohen kappa to fit in the code below. Please don't provide me with another code. Just add the requirement I requested at the end of my code.
Thank you in advance.
import os
import numpy as np
import pandas as pd
import keras
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import load_img
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import random
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, MaxPool2D , Dropout, Flatten, Dense, Activation, BatchNormalization
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from sklearn.utils import class_weight
import keras,os
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D , Flatten
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, cohen_kappa_score, roc_auc_score
print(os.listdir("D:\RansomSecondApproach\Ransomware_Detection_using _CNN\MixImages"))
# Define Constants
FAST_RUN = False
IMAGE_WIDTH=256 # 150 accept maybe 256
IMAGE_HEIGHT=256 # maybe 256
IMAGE_SIZE=(IMAGE_WIDTH, IMAGE_HEIGHT)
IMAGE_CHANNELS=3 # maybe not need
physical_devices = tf.config.experimental.list_physical_devices('GPU')
print(physical_devices)
if physical_devices:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
# Prepare Traning Data
filenames = os.listdir("D:\RansomSecondApproach\Ransomware_Detection_using _CNN\MixImages")
categories = []
for filename in filenames:
category = filename.split('l')[0]
if category == 'image_benign_':
categories.append(0)
else:
categories.append(1)
df = pd.DataFrame({
'filename': filenames,
'category': categories
})
print(df.head())
print(df.tail())
# in collab it will work
df['category'].value_counts().plot.bar()
model = Sequential()
model.add(Conv2D(16, (3, 3), activation='relu', input_shape=(IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS)))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(2, activation='softmax')) # 2 because we have cat and dog classes
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# Callbacks
## Early Stop To prevent over fitting we will stop the learning after 10 epochs and val_loss value not decreased
earlystop = EarlyStopping(patience=10)
# Learning Rate Reduction
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=2,
verbose=1,
factor=0.5,
min_lr=0.00001)
callbacks = [earlystop, learning_rate_reduction]
#Prepare data
df["category"] = df["category"].replace({0: 'benign', 1: 'malware'})
train_df, validate_df = train_test_split(df, test_size=0.20, random_state=42)
train_df = train_df.reset_index(drop=True)
validate_df = validate_df.reset_index(drop=True)
total_train = train_df.shape[0]
total_validate = validate_df.shape[0]
batch_size= 15 # defualt 15
# Traning Generator
# Defualt
train_datagen = ImageDataGenerator(
rotation_range=15,
rescale=1./255,
shear_range=0.1,
zoom_range=0.2,
horizontal_flip=True,
width_shift_range=0.1,
height_shift_range=0.1
)
###############################################
# Augmenting the training data
train_generator = train_datagen.flow_from_dataframe(
train_df,
"D:\RansomSecondApproach\Ransomware_Detection_using _CNN\MixImages",
x_col='filename',
y_col='category',
target_size=IMAGE_SIZE,
class_mode='categorical',
batch_size=batch_size
)
# Validation Generator
validation_datagen = ImageDataGenerator(rescale=1./255)
validation_generator = validation_datagen.flow_from_dataframe(
validate_df,
"D:\RansomSecondApproach\Ransomware_Detection_using _CNN\MixImages",
x_col='filename',
y_col='category',
target_size=IMAGE_SIZE,
class_mode='categorical',
batch_size=batch_size
)
# See how our generator work
example_df = train_df.sample(n=1).reset_index(drop=True)
example_generator = train_datagen.flow_from_dataframe(
example_df,
"D:\RansomSecondApproach\Ransomware_Detection_using _CNN\MixImages",
x_col='filename',
y_col='category',
target_size=IMAGE_SIZE,
class_mode='categorical'
)
# in collab it will work
plt.figure(figsize=(12, 12))
for i in range(0, 15):
plt.subplot(5, 3, i+1)
for X_batch, Y_batch in example_generator:
image = X_batch[0]
plt.imshow(image)
break
plt.tight_layout()
plt.show()
# compile
epochs = 1 # if FAST_RUN else 50
history = model.fit(
train_generator,
epochs=epochs,
validation_data=validation_generator,
validation_steps=total_validate//batch_size,
steps_per_epoch=total_train//batch_size,
# class_weight=class_weights, # add
# shuffle=True,
callbacks=callbacks
)
model.save_weights("model.h5")
# Virtualize Training
##in collab
# fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 12))
# ax1.plot(history.history['loss'], color='b', label="Training loss")
# ax1.plot(history.history['val_loss'], color='r', label="validation loss")
# ax1.set_xticks(np.arange(1, epochs, 1))
# ax1.set_yticks(np.arange(0, 1, 0.1))
# ax2.plot(history.history['acc'], color='b', label="Training accuracy")
# ax2.plot(history.history['val_acc'], color='r',label="Validation accuracy")
# ax2.set_xticks(np.arange(1, epochs, 1))
# legend = plt.legend(loc='best', shadow=True)
# plt.tight_layout()
# plt.show()
# Prepare Testing Data
test_filenames = os.listdir("D:\RansomSecondApproach\Ransomware_Detection_using _CNN\MixImages")
test_df = pd.DataFrame({
'filename': test_filenames
})
nb_samples = test_df.shape[0]
# Create Testing Generator
# output Found 12500 images in kaggle.
test_gen = ImageDataGenerator(rescale=1./255)
test_generator = test_gen.flow_from_dataframe(
test_df,
"D:\RansomSecondApproach\Ransomware_Detection_using _CNN\MixImages",
x_col='filename',
y_col=None,
class_mode=None,
target_size=IMAGE_SIZE,
batch_size=batch_size,
shuffle=False
)
# Predict
predict = model.predict_generator(test_generator, steps=np.ceil(nb_samples/batch_size))
test_df['category'] = np.argmax(predict, axis=-1)
label_map = dict((v,k) for k,v in train_generator.class_indices.items())
test_df['category'] = test_df['category'].replace(label_map)
test_df['category'] = test_df['category'].replace({ 0: 'benign', 1: 'malware' })
# Virtaulize Result
# Compute the average training and validation accuracy
avg_train_acc = np.mean(history.history['accuracy'])
avg_val_acc = np.mean(history.history['val_accuracy'])
# Print the average training and validation accuracy
print("Average training accuracy:", avg_train_acc)
print("Average validation accuracy:", avg_val_acc)
#in collab
test_df['category'].value_counts().plot.bar()
# See predicted result with images
# in collab
sample_test = test_df.head(18)
sample_test.head()
plt.figure(figsize=(12, 24))
for index, row in sample_test.iterrows():
filename = row['filename']
category = row['category']
img = load_img("D:\\RansomSecondApproach\\Ransomware_Detection_using _CNN\\MixImages\\" +filename, target_size=IMAGE_SIZE)
plt.subplot(6, 3, index+1)
plt.imshow(img)
plt.xlabel(filename + '(' + "{}".format(category) + ')' )
plt.tight_layout()
plt.show()
# Submission
submission_df = test_df.copy()
submission_df['id'] = submission_df['filename'].str.split('.').str[0]
submission_df['label'] = submission_df['category']
submission_df.drop(['filename', 'category'], axis=1, inplace=True)
submission_df.to_csv('submission.csv', index=False)
Step by Step Solution
There are 3 Steps involved in it
Step: 1
Get Instant Access to Expert-Tailored Solutions
See step-by-step solutions with expert insights and AI powered tools for academic success
Step: 2
Step: 3
Ace Your Homework with AI
Get the answers you need in no time with our AI-driven, step-by-step assistance
Get Started