Skip to content

Instantly share code, notes, and snippets.

@Shamim-38
Created December 5, 2021 07:24
Show Gist options
  • Save Shamim-38/25ddb8c15930feceb5a96efb9fa0c331 to your computer and use it in GitHub Desktop.
Save Shamim-38/25ddb8c15930feceb5a96efb9fa0c331 to your computer and use it in GitHub Desktop.
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
from tensorflow.keras.datasets import fashion_mnist
#(x_train, _), (x_test, _) = fashion_mnist.load_data()
import os
from imutils import paths
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from tensorflow.keras.utils import to_categorical
import cv2
from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
#print ('train shape:', trainX.shape)
print("[INFO] loading images...")
imagePaths = list(paths.list_images("apples_test"))
data = []
labels = []
# loop over the image paths
for imagePath in imagePaths:
# extract the class label from the filename
label = imagePath.split(os.path.sep)[-2]
print(label)
# load thapples_traine input image (224x224) and preprocess it
image = load_img(imagePath, target_size=(32, 32))
image = img_to_array(image)
image = preprocess_input(image)
# update the data and labels lists, respectively
data.append(image)
labels.append(label)
# convert the data and labels to NumPy arrays
data = np.array(data, dtype="float32")
labels = np.array(labels)
#print(data.shape)
#print(labels.shape)
#print(labels)
# perform one-hot encoding on the labels
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
print(labels)
#print(lb.classes_)
#labels = to_categorical(labels)
print(labels.shape)
#print(labels)
#onehot_encoder = OneHotEncoder(sparse=False)
#(trainX, testX), (trainY, testY) = train_test_split(data, labels, test_size=0.15, stratify=labels, random_state=42)
# partition the data into training and testing splits using 75% of
# the data for training and the remaining 25% for testing
(x_train, x_test, trainY, testY) = train_test_split(data, labels,
test_size=0.15, stratify=labels, random_state=42)
#x_train = x_train.astype('float32') / 255.
#x_test = x_test.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 32, 32, 3))
x_test = np.reshape(x_test, (len(x_test), 32, 32, 3))
# Create the Encoder and Decoder#pass the gray scale input image of size(28,28,1)
inputs = tf.keras.Input(shape=(32, 32, 3), name='input_layer')
# Conv Block 1 -> BatchNorm->leaky Relu
encoded = tf.keras.layers.Conv2D(32, kernel_size=(3, 3), strides= 1, padding='same', name='conv_1')(inputs)
encoded = tf.keras.layers.BatchNormalization(name='batchnorm_1')(encoded)
encoded = tf.keras.layers.LeakyReLU(name='leaky_relu_1')(encoded)# Conv Block 2 -> BatchNorm->leaky Relu
encoded = tf.keras.layers.Conv2D(64, kernel_size=(3, 3), strides= 2, padding='same', name='conv_2')(encoded)
encoded = tf.keras.layers.BatchNormalization(name='batchnorm_2')(encoded)
encoded = tf.keras.layers.LeakyReLU(name='leaky_relu_2')(encoded)
# Conv Block 3 -> BatchNorm->leaky Relu
encoded = tf.keras.layers.Conv2D(64, kernel_size=(3, 3), strides=2, padding='same', name='conv_3')(encoded)
encoded = tf.keras.layers.BatchNormalization(name='batchnorm_3')(encoded)
encoded = tf.keras.layers.LeakyReLU(name='leaky_relu_3')(encoded)#Decoder
# DeConv Block 1-> BatchNorm->leaky Relu
decoded = tf.keras.layers.Conv2DTranspose(64, (3, 3), strides= 1, padding='same',name='conv_transpose_1')(encoded)
decoded = tf.keras.layers.BatchNormalization(name='batchnorm_4')(decoded)
decoded = tf.keras.layers.LeakyReLU(name='leaky_relu_4')(decoded)
# DeConv Block 2-> BatchNorm->leaky Relu
decoded = tf.keras.layers.Conv2DTranspose(64, (3, 3), strides= 2, padding='same', name='conv_transpose_2')(decoded)
decoded = tf.keras.layers.BatchNormalization(name='batchnorm_5')(decoded)
decoded = tf.keras.layers.LeakyReLU(name='leaky_relu_5')(decoded)
# DeConv Block 3-> BatchNorm->leaky Relu
decoded = tf.keras.layers.Conv2DTranspose(32, (3, 3), 2, padding='same', name='conv_transpose_3')(decoded)
decoded = tf.keras.layers.BatchNormalization(name='batchnorm_6')(decoded)
decoded = tf.keras.layers.LeakyReLU(name='leaky_relu_6')(decoded)
# output
outputs = tf.keras.layers.Conv2DTranspose(3, (3, 3), 1,padding='same', activation='sigmoid', name='conv_transpose_4')(decoded)
def SSIMLoss(y_true, y_pred):
return 1 - tf.reduce_mean(tf.image.ssim(y_true, y_pred,1.0))
autoencoder = tf.keras.Model(inputs, outputs)
optimizer = tf.keras.optimizers.Adam(lr = 0.0005)
autoencoder.compile(optimizer=optimizer, loss=SSIMLoss)
hist=autoencoder.fit(x_train, x_train,
epochs=100,
batch_size=128,
shuffle=True,
validation_data=(x_test, x_test)
)
decoded_imgs = autoencoder.predict(x_test)
n = 10
plt.figure(figsize=(20, 4))
for i in range(1, n + 1):
# Display original
ax = plt.subplot(2, n, i)
plt.imshow(x_test[i].reshape(32, 32, 3))
plt.gray()
ax.get_xaxis().set_visible(False)
plt.title("Original")
ax.get_yaxis().set_visible(False)
# Display reconstruction
ax = plt.subplot(2, n, i + n)
plt.title("Reconstructed")
plt.imshow(decoded_imgs[i].reshape(32, 32, 3))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
from tensorflow.keras.datasets import cifar10
from matplotlib import pyplot as plt
# load dataset
(trainX, trainy), (testX, testy) = cifar10.load_data()
n = 6 # how many encoded and decoded images we will displaydecoded_imgs= autoencoder.predict(x_test)
decoded_mnistimgs= autoencoder.predict(testX)
plt.figure(figsize=(20, 14), dpi=100)
plt.subplots_adjust( wspace=0.1, hspace=0.07)
plt_a=1
for i in range(n):
# Original training dataset vs Original training
ax = plt.subplot(3, n, plt_a )
plt.imshow(x_test[i].reshape(32, 32, 3))
ax.get_xaxis().set_visible(True)
ax.get_yaxis().set_visible(False)
value_a = SSIMLoss(x_test[i], x_test[i])
ax.set_title("Original Image")
label = 'SSIM Loss value: {:.3f}'
ax.set_xlabel(label.format(value_a) )
# Reconstructed good data vs Original training data
ax = plt.subplot(3, n, plt_a + n )
plt.imshow(decoded_imgs[i].reshape(32, 32, 3))
ax.get_xaxis().set_visible(True)
ax.get_yaxis().set_visible(False)
value_a = SSIMLoss(decoded_imgs[i], x_test[i])
ax.set_title("Reconstructed Image")
label = 'SSIM Loss value: {:.3f}'
ax.set_xlabel(label.format(value_a) )
# Reconstructed anomalous data vs Original training data
ax = plt.subplot(3, n, plt_a + 2*n)
plt.imshow(decoded_mnistimgs[i].reshape(32, 32, 3))
ax.get_xaxis().set_visible(True)
ax.get_yaxis().set_visible(False)
value = SSIMLoss(decoded_mnistimgs[i], decoded_imgs[i])
label = 'SSIM Loss value: {:.3f}'
ax.set_title("Anamolus Image " )
ax.set_xlabel(label.format(value) )
plt_a+=1
plt.show()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment