I have a U-Net model with pretrained weights from an Auto-encoder, The Auto-encoder was built an image dataset of 1400 images. I am trying to perform semantic segmentation with 1400 labelled images of a clinical dataset. The model performs well with an iou_score=0.97
on my test image dataset, but when I try to test it on a random image outside my dataset, I get a very bad segmentation result. I don’t understand the reason for it. Please review my code and suggest me where I was wrong.
Training on my dataset & labels :
import cv2 import numpy as np from matplotlib import pyplot as plt ######################################################################### #Load data for U-net training. ################################################################# import os os.environ['CUDA_VISIBLE_DEVICES'] = '0' os.environ["SM_FRAMEWORK"] = "tf.keras" import glob import cv2 import os import numpy as np from matplotlib import pyplot as plt from sklearn.model_selection import train_test_split train_images = [] #Resizing images is optional, CNNs are ok with large images SIZE_X = 256 #Resize images (height = X, width = Y) SIZE_Y = 256 #Capture training image info as a list directory_path = '/content/drive/MyDrive/Colab Notebooks/semantic/images/' list_of_files = sorted( filter( os.path.isfile, glob.glob(directory_path + '*.jpg', recursive=True) ) ) for img_path in list_of_files: #for img_path in glob.glob(os.path.join(directory_path, "*.png")): print(img_path) img = cv2.imread(img_path, cv2.IMREAD_COLOR) img = cv2.resize(img, (SIZE_Y, SIZE_X)) img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) train_images.append(img) #train_labels.append(label) #Convert list to array for machine learning processing train_images = np.array(train_images) train_masks = [] labels_path = '/content/drive/MyDrive/Colab Notebooks/semantic/lables/' list_of_labels = sorted( filter( os.path.isfile, glob.glob(labels_path + '*.png', recursive=True) ) ) for mask_path in list_of_labels: #for img_path in glob.glob(os.path.join(directory_path, "*.png")): print(mask_path) mask = cv2.imread(mask_path, 0) mask = cv2.resize(mask, (SIZE_Y, SIZE_X)) #img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) train_masks.append(mask) #Convert list to array for machine learning processing train_masks = np.array(train_masks) #Normalize images image_dataset = np.array(train_images)/255. #D not normalize masks, just rescale to 0 to 1. mask_dataset = np.expand_dims((np.array(train_masks)), 3) /255. from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(image_dataset, mask_dataset, test_size = 0.20, random_state = 0) #Load unet model and load pretrained weights from models import build_autoencoder, build_encoder, build_unet from tensorflow.keras.optimizers import Adam #import segmentation_models as sm input_shape = (256, 256, 3) pre_trained_unet_model = build_unet(input_shape) pre_trained_unet_model.load_weights('/content/drive/MyDrive/Colab Notebooks/semantic/unet_clinical_model_weights.h5') pre_trained_unet_model_weights = pre_trained_unet_model.get_weights()[0][1] pretrained_encoder_wts = np.load('/content/drive/MyDrive/Colab Notebooks/semantic/pretrained_clinical_encoder-weights_300e.npy') if pre_trained_unet_model_weights.all() == pretrained_encoder_wts.all(): print("Both weights are identical") else: print("Something wrong, weghts are different") pre_trained_unet_model.compile('Adam', loss=sm.losses.binary_focal_jaccard_loss, metrics=[sm.metrics.iou_score]) #################################################################### #Train the model batch_size=16 pre_trained_unet_model_history = pre_trained_unet_model.fit(X_train, y_train, verbose=1, batch_size = batch_size, validation_data=(X_test, y_test ), shuffle=False, epochs=300) pre_trained_unet_model.save('/content/drive/MyDrive/Colab Notebooks/semantic/pre_trained_unet_model_300epochs.h5')
Testing my model :
from keras.models import load_model pre_trained_unet_model = load_model('/content/drive/MyDrive/Colab Notebooks/semantic/pre_trained_unet_model_300epochs.h5', compile=False) my_model = pre_trained_unet_model import random test_img_number = random.randint(0, X_test.shape[0]-1) #test_img_number = 119 test_img = X_test[test_img_number] ground_truth=y_test[test_img_number] test_img_input=np.expand_dims(test_img, 0) prediction = (my_model.predict(test_img_input)[0,:,:,0] > 0.5).astype(np.uint8) plt.figure(figsize=(16, 8)) plt.subplot(231) plt.title('Testing Image') plt.imshow(test_img, cmap='gray') plt.subplot(232) plt.title('Testing Label') plt.imshow(ground_truth[:,:,0], cmap='gray') plt.subplot(233) plt.title('Prediction on test image') plt.imshow(prediction, cmap='gray') plt.show()
When I test the same model on a random clinical ulcer image, I get a very bad segmentation result. Code for testing the model on a random image
from keras.models import load_model import segmentation_models as sm import numpy as np import matplotlib.pyplot as plt model = load_model('/content/drive/MyDrive/Colab Notebooks/semantic/pre_trained_unet_model_300epochs.h5', compile=False) # load the model model.compile(loss=sm.losses.binary_focal_jaccard_loss, optimizer='Adam', metrics=[sm.metrics.iou_score]) from keras.preprocessing import image test_image= image.load_img('/content/drive/MyDrive/Colab Notebooks/semantic/images/Foot ulcer 3-3.jpg',target_size = (256, 256)) test_image = image.img_to_array(test_image) test_image = np.expand_dims(test_image, axis = 0) result = model.predict(test_image) result_img = result.reshape(256,256) plt.imshow(result_img, cmap='gray')
Kindly suggest me if there is an error in my testing procedure.
Advertisement
Answer
Before training and validating you are normalizing data at this line –
image_dataset = np.array(train_images) / 255.
So you must do the same while testing –
from keras.models import load_model import segmentation_models as sm import numpy as np import matplotlib.pyplot as plt model = load_model('/content/drive/MyDrive/Colab Notebooks/semantic/pre_trained_unet_model_300epochs.h5', compile=False) # load the model model.compile(loss=sm.losses.binary_focal_jaccard_loss, optimizer='Adam', metrics=[sm.metrics.iou_score]) from keras.preprocessing import image test_image= image.load_img('/content/drive/MyDrive/Colab Notebooks/semantic/images/Foot ulcer 3-3.jpg',target_size = (256, 256)) test_image = image.img_to_array(test_image) test_image = np.expand_dims(test_image, axis = 0) / 255.0 result = model.predict(test_image) result_img = result.reshape(256,256) plt.imshow(result_img, cmap='gray')