[PYTHON] Keras multiclass classification Iris

environment

introduction

-(Caution) The emphasis is not on the content of multi-class classification, but on the use of tf.keras. --Create Multilayer Perceptron (MLP) with Tensorflow keras --Easy to use tf.keras, save the model, and load the saved model ――When I was doing kaggle, I checked how to read the keras code once again because I needed it.

data set

--Verified using the iris variety data (Iris plants dataset) attached to scikit-learn --The Iris dataset contains three types of information (setosa, versicolor, virginica), so it can be used for multi-class classification.

Whole source code

import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
import tensorflow as tf
import matplotlib.pyplot as plt

#Read iris dataset
iris = load_iris()


#Training dataset,Divide into test data
data_X = iris.data
data_y = to_categorical(iris.target) # one-hot encoding

train_X, test_X, train_y, test_y = train_test_split(data_X, data_y, test_size=0.3, random_state=0)


#Model building
model = tf.keras.models.Sequential([
    tf.keras.layers.Input(4),
    tf.keras.layers.Dense(100, activation='relu'),
    tf.keras.layers.Dense(3, activation='softmax')
])
#Compiling the model
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01),
              loss='categorical_crossentropy',
              metrics=['accuracy'])

#Model learning
result = model.fit(train_X, train_y, batch_size=32, epochs=50, validation_data=(test_X, test_y), verbose=1)


#Accuracy plot
plt.figure()
plt.title('Accuracy')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.plot(result.history['accuracy'], label='train')
plt.plot(result.history['val_accuracy'], label='test')
plt.legend()

#Loss plot
plt.figure()
plt.title('categorical_crossentropy Loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.plot(result.history['loss'], label='train')
plt.plot(result.history['val_loss'], label='test')
plt.legend()
plt.show()



# model.Use evaluate to recall the error and accuracy of the trained model
train_score = model.evaluate(train_X, train_y)
test_score = model.evaluate(test_X, test_y)
print('Train loss:', train_score[0])
print('Train accuracy:', train_score[1])
print('Test loss:', test_score[0])
print('Test accuracy:', test_score[1])



#Check the result(predict)
pred_train = model.predict(train_X)
pred_test = model.predict(test_X)
pred_train = np.argmax(pred_train, axis=1)
pred_test = np.argmax(pred_test, axis=1)

print(pred_train)
print(np.argmax(train_y, axis=1))
print(pred_test)
print(np.argmax(test_y, axis=1))

acc.png

loss.png

Data load

--Load iris data using scikit-learn

from sklearn.datasets import load_iris
#Read iris dataset
iris = load_iris()
#Check size
(iris.data.shape), (iris.target.shape)
# ((150, 4), (150,))

print(iris.feature_names) #Check the label
print(iris.data) #Explanatory variable
print(iris.target_names) #Check the label
print(iris.target) #Objective variable

#Check the type of dataset
type(iris.data) 
type(iris.target)
# numpy.ndarray

--Explanatory variable

['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)']
[[5.1 3.5 1.4 0.2]
 [4.9 3.  1.4 0.2]
 [4.7 3.2 1.3 0.2]
 [4.6 3.1 1.5 0.2]
 [5.  3.6 1.4 0.2]...

--Objective variable

['setosa' 'versicolor' 'virginica']
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
       2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
       2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])

Data processing

--Convert iris objective variable to one-hot encoding that is easy to handle with neural networks

from tensorflow.keras.utils import to_categorical

# one-hot encoding
data_y = to_categorical(iris.target)
# 0 => [1, 0, 0]
# 1 => [0, 1, 0]
# 2 => [0, 0, 1]

--Can also be described as follows

#Example:Convert class vector (integer) to binary class matrix
to_categorical(iris.target,
               num_classes=None,
               dtype='float32')

--See below for details https://www.tensorflow.org/api_docs/python/tf/keras/utils/to_categorical

--Next, divide the dataset into training data and test data.

#Training dataset,Split into test data
from sklearn.model_selection import train_test_split

data_X = iris.data
data_y = to_categorical(iris.target) # one-hot encoding

train_X, test_X, train_y, test_y = train_test_split(data_X, data_y, test_size=0.3, random_state=0)

Network architecture

--Input layer 4 variables --Middle layer 100 variables --Output layer 3 variables --Activation function relu --Loss function categorical_crossentropy --Optimization function Adam

import tensorflow as tf

#Model building
model = tf.keras.models.Sequential([
    tf.keras.layers.Input(4),
    tf.keras.layers.Dense(100, activation='relu'),
    tf.keras.layers.Dense(3, activation='softmax')
])
#Compiling the model
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01),
              loss='categorical_crossentropy',
              metrics=['accuracy'])

#Model learning
result = model.fit(train_X, train_y, batch_size=32, epochs=50, validation_data=(test_X, test_y), verbose=1)
Epoch 1/50
4/4 [==============================] - 0s 36ms/step - loss: 1.0097 - accuracy: 0.5810 - val_loss: 0.7336 - val_accuracy: 0.6222
Epoch 2/50
4/4 [==============================] - 0s 6ms/step - loss: 0.7082 - accuracy: 0.7048 - val_loss: 0.6553 - val_accuracy: 0.6000
Epoch 3/50
4/4 [==============================] - 0s 5ms/step - loss: 0.5494 - accuracy: 0.7905 - val_loss: 0.4738 - val_accuracy: 0.9111

--Don't output intermediate results by setting verbose = 0 in model.fit

Model evaluation

# model.Use evaluate to recall the error and accuracy of the trained model
train_score = model.evaluate(train_X, train_y)
test_score = model.evaluate(test_X, test_y)
print('Train loss:', train_score[0])
print('Train accuracy:', train_score[1])
print('Test loss:', test_score[0])
print('Test accuracy:', test_score[1])
4/4 [==============================] - 0s 2ms/step - loss: 0.0649 - accuracy: 0.9714
2/2 [==============================] - 0s 2ms/step - loss: 0.1223 - accuracy: 0.9778
Train loss: 0.06492960453033447
Train accuracy: 0.9714285731315613
Test loss: 0.12225695699453354
Test accuracy: 0.9777777791023254
#Check the result(predict)
pred_train = model.predict(train_X)
pred_test = model.predict(test_X)
pred_train = np.argmax(pred_train, axis=1)
pred_test = np.argmax(pred_test, axis=1)

print(pred_train)
print(np.argmax(train_y, axis=1))
print(pred_test)
print(np.argmax(test_y, axis=1))
[1 2 2 2 2 1 2 1 1 2 1 2 2 1 2 1 0 2 1 1 1 1 2 0 0 2 1 0 0 1 0 2 1 0 1 2 1
 0 2 2 2 2 0 0 2 2 0 2 0 2 2 0 0 1 0 0 0 1 2 2 0 0 0 1 1 0 0 1 0 2 1 2 1 0
 2 0 2 0 0 2 0 2 1 1 1 2 2 1 1 0 1 2 2 0 1 1 1 1 0 0 0 2 1 2 0]
[1 2 2 2 2 1 2 1 1 2 2 2 2 1 2 1 0 2 1 1 1 1 2 0 0 2 1 0 0 1 0 2 1 0 1 2 1
 0 2 2 2 2 0 0 2 2 0 2 0 2 2 0 0 2 0 0 0 1 2 2 0 0 0 1 1 0 0 1 0 2 1 2 1 0
 2 0 2 0 0 2 0 2 1 1 1 2 2 1 1 0 1 2 2 0 1 1 1 1 0 0 0 2 1 2 0]
[2 1 0 2 0 2 0 1 1 1 2 1 1 1 1 0 1 1 0 0 2 1 0 0 2 0 0 1 1 0 2 1 0 2 2 1 0
 2 1 1 2 0 2 0 0]
[2 1 0 2 0 2 0 1 1 1 2 1 1 1 1 0 1 1 0 0 2 1 0 0 2 0 0 1 1 0 2 1 0 2 2 1 0
 1 1 1 2 0 2 0 0]

Save model

#Save model as json file
#Save weight as hdf5

config = model.to_json()

with open('model.json','w') as file:
    file.write(config)

model.save_weights('weights.hdf5')

Model loading

with open('model.json','r') as file:
    model_json = file.read()
    model = tf.keras.models.model_from_json(model_json)

model.load_weights('weights.hdf5')

model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01),
              loss='categorical_crossentropy',
              metrics=['accuracy'])

train_score = model.evaluate(train_X, train_y)
test_score = model.evaluate(test_X, test_y)
print('Train loss:', train_score[0])
print('Train accuracy:', train_score[1])
print('Test loss:', test_score[0])
print('Test accuracy:', test_score[1])
4/4 [==============================] - 0s 2ms/step - loss: 0.0649 - accuracy: 0.9714
2/2 [==============================] - 0s 2ms/step - loss: 0.1223 - accuracy: 0.9778
Train loss: 0.06492960453033447
Train accuracy: 0.9714285731315613
Test loss: 0.12225695699453354
Test accuracy: 0.9777777791023254

--See below for more information on tf.keras.models.model_from_json

https://www.tensorflow.org/api_docs/python/tf/keras/models/model_from_json

References

End (To understand the contents of the data)

--Try using pandas --Try using seaborn

import pandas as pd
import seaborn as sns

df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['target'] = iris.target
df.loc[df['target'] == 0, 'target'] = "setosa"
df.loc[df['target'] == 1, 'target'] = "versicolor"
df.loc[df['target'] == 2, 'target'] = "virginica"

df.head(2)
sepal length (cm) sepal width (cm) petal length (cm) petal width (cm) target
5.1 3.5 1.4 0.2 setosa
4.9 3.0 1.4 0.2 setosa
sns.pairplot(df, hue="target")

sns.png

Recommended Posts

Keras multiclass classification Iris
SVM (multi-class classification)
Naive Bayes (multiclass classification)
K-nearest neighbor method (multiclass classification)
ROC curve for multiclass classification
Multi-class, multi-label classification of images with pytorch