Skip to content

Instantly share code, notes, and snippets.

@mvgolom
Forked from rdelassus/spacenet_segnet.py
Created July 23, 2023 12:21
Show Gist options
  • Save mvgolom/d053ae55ef1f08bac30572a89e37ba3d to your computer and use it in GitHub Desktop.
Save mvgolom/d053ae55ef1f08bac30572a89e37ba3d to your computer and use it in GitHub Desktop.
a segnet-like architecture for building detection in the spacenet dataset
#from __future__ import absolute_import
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.callbacks import ModelCheckpoint
from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import Layer, Dense, Dropout, Activation, Flatten, Reshape, Merge, Permute
from keras.layers import ZeroPadding2D, UpSampling2D
from keras.layers.normalization import BatchNormalization
import sys
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
#from preprocessing.visualize_prepro import shiftedColorMap
import itertools
import tensorflow as tf
path = sys.argv[1]
# input image dimensions
img_rows, img_cols = 400, 400
# output image dimensions
label_rows, label_cols = 400, 400
with tf.device('/gpu:1'):
# we create two instances with the same arguments
img_data_gen_args = dict(
# featurewise_center=True,
# featurewise_std_normalization=True,
rescale=1. / 255,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2,
fill_mode="constant",
cval=0
)
label_data_gen_args = dict(
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2,
fill_mode="constant",
cval=1
)
image_datagen = ImageDataGenerator(**img_data_gen_args)
mask_datagen = ImageDataGenerator(**label_data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
# image_datagen.fit(images, augment=True, seed=seed)
# mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
os.path.join(path, '3band/'),
target_size=(img_rows, img_cols),
class_mode=None,
batch_size=8,
shuffle=False,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
os.path.join(path, 'labels'),
target_size=(label_rows, label_cols),
class_mode=None,
batch_size=8,
shuffle=False,
color_mode='grayscale',
seed=seed)
# combine generators into one which yields image and masks
train_generator = itertools.izip(image_generator, mask_generator)
kernel = 3
filter_size = 64
pad = 1
pool_size = 2
model = Sequential()
model.add(Layer(input_shape=(img_rows, img_cols, 3)))
# encoding layers
model.add(ZeroPadding2D(padding=(pad, pad)))
model.add(Convolution2D(filter_size, kernel, kernel, border_mode='valid'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(ZeroPadding2D(padding=(pad, pad)))
model.add(Convolution2D(128, kernel, kernel, border_mode='valid'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(ZeroPadding2D(padding=(pad, pad)))
model.add(Convolution2D(256, kernel, kernel, border_mode='valid'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(ZeroPadding2D(padding=(pad, pad)))
model.add(Convolution2D(512, kernel, kernel, border_mode='valid'))
model.add(BatchNormalization())
model.add(Activation('relu'))
# decoding layers
model.add(ZeroPadding2D(padding=(pad, pad)))
model.add(Convolution2D(512, kernel, kernel, border_mode='valid'))
model.add(BatchNormalization())
model.add(UpSampling2D(size=(pool_size, pool_size)))
model.add(ZeroPadding2D(padding=(pad, pad)))
model.add(Convolution2D(256, kernel, kernel, border_mode='valid'))
model.add(BatchNormalization())
model.add(UpSampling2D(size=(pool_size, pool_size)))
model.add(ZeroPadding2D(padding=(pad, pad)))
model.add(Convolution2D(128, kernel, kernel, border_mode='valid'))
model.add(BatchNormalization())
model.add(UpSampling2D(size=(pool_size, pool_size)))
model.add(ZeroPadding2D(padding=(pad, pad)))
model.add(Convolution2D(filter_size, kernel, kernel, border_mode='valid'))
model.add(BatchNormalization())
model.add(Convolution2D(1, 1, 1, border_mode='valid',))
print model.output_shape
model.add(Reshape((label_rows * label_cols,)))
model.add(Activation('sigmoid'))
model.add(Reshape((label_rows, label_cols, 1)))
model.compile(loss="binary_crossentropy", optimizer='rmsprop',
metrics=['binary_accuracy'])
model.summary()
checkpointer = ModelCheckpoint(filepath="weights.hdf5", verbose=1, save_best_only=False)
model.fit_generator(
train_generator,
samples_per_epoch=1000,
nb_epoch=20,
callbacks=[checkpointer])
model.save('spacenetmodel2.h5')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment