Commit 10abee85 by Gencer

Initial commit

parents
# -*- coding: utf-8 -*-
#
# BigEarthNet class to create tf.data.Dataset based on the TFRecord files.
#
# Author: Gencer Sumbul, http://www.user.tu-berlin.de/gencersumbul/
# Email: gencer.suembuel@tu-berlin.de
# Date: 23 Dec 2019
# Version: 1.0.1
import tensorflow as tf
BAND_STATS = {
'mean': {
'B01': 340.76769064,
'B02': 429.9430203,
'B03': 614.21682446,
'B04': 590.23569706,
'B05': 950.68368468,
'B06': 1792.46290469,
'B07': 2075.46795189,
'B08': 2218.94553375,
'B8A': 2266.46036911,
'B09': 2246.0605464,
'B11': 1594.42694882,
'B12': 1009.32729131
},
'std': {
'B01': 554.81258967,
'B02': 572.41639287,
'B03': 582.87945694,
'B04': 675.88746967,
'B05': 729.89827633,
'B06': 1096.01480586,
'B07': 1273.45393088,
'B08': 1365.45589904,
'B8A': 1356.13789355,
'B09': 1302.3292881,
'B11': 1079.19066363,
'B12': 818.86747235
}
}
class BigEarthNet:
def __init__(self, TFRecord_paths, batch_size, nb_epoch, shuffle_buffer_size):
dataset = tf.data.TFRecordDataset(TFRecord_paths)
if shuffle_buffer_size > 0:
dataset = dataset.shuffle(buffer_size=shuffle_buffer_size)
dataset = dataset.repeat(nb_epoch)
dataset = dataset.map(
self.parse_function, num_parallel_calls=10)
dataset = dataset.batch(batch_size, drop_remainder=False)
self.dataset = dataset.prefetch(10)
self.batch_iterator = self.dataset.make_one_shot_iterator()
def parse_function(self, example_proto):
parsed_features = tf.parse_single_example(
example_proto,
{
'B01': tf.FixedLenFeature([20*20], tf.int64),
'B02': tf.FixedLenFeature([120*120], tf.int64),
'B03': tf.FixedLenFeature([120*120], tf.int64),
'B04': tf.FixedLenFeature([120*120], tf.int64),
'B05': tf.FixedLenFeature([60*60], tf.int64),
'B06': tf.FixedLenFeature([60*60], tf.int64),
'B07': tf.FixedLenFeature([60*60], tf.int64),
'B08': tf.FixedLenFeature([120*120], tf.int64),
'B8A': tf.FixedLenFeature([60*60], tf.int64),
'B09': tf.FixedLenFeature([20*20], tf.int64),
'B11': tf.FixedLenFeature([60*60], tf.int64),
'B12': tf.FixedLenFeature([60*60], tf.int64),
'original_labels': tf.VarLenFeature(dtype=tf.string),
'original_labels_multi_hot': tf.FixedLenFeature([43], tf.int64),
'patch_name': tf.VarLenFeature(dtype=tf.string)
}
)
return {
'B01': tf.reshape(parsed_features['B01'], [20, 20]),
'B02': tf.reshape(parsed_features['B02'], [120, 120]),
'B03': tf.reshape(parsed_features['B03'], [120, 120]),
'B04': tf.reshape(parsed_features['B04'], [120, 120]),
'B05': tf.reshape(parsed_features['B05'], [60, 60]),
'B06': tf.reshape(parsed_features['B06'], [60, 60]),
'B07': tf.reshape(parsed_features['B07'], [60, 60]),
'B08': tf.reshape(parsed_features['B08'], [120, 120]),
'B8A': tf.reshape(parsed_features['B8A'], [60, 60]),
'B09': tf.reshape(parsed_features['B09'], [20, 20]),
'B11': tf.reshape(parsed_features['B11'], [60, 60]),
'B12': tf.reshape(parsed_features['B12'], [60, 60]),
'original_labels_multi_hot': parsed_features['original_labels_multi_hot'],
'original_labels': parsed_features['original_labels'],
'patch_name': parsed_features['patch_name']
}
\ No newline at end of file
# BigEarthNet Deep Learning Models With TensorFlow
This repository contains code to use deep learning models, pre-trained on the [BigEarthNet archive](http://bigearth.net/) with TensorFlow, to train new models, and to evaluate pre-trained models. It is recommended to first check the [BigEarthNet Deep Learning Models repository](https://gitlab.tu-berlin.de/rsim/bigearthnet-models).
## Prerequisites
* The definitions of ResNet and VGG models are based on their TensorFlow-Slim implementations. Thus, you need to first download the `nets` folder of [TensorFlow-Slim repository](https://github.com/tensorflow/models/tree/master/research/slim/nets) to the root folder.
* The `prep_splits.py` script from [here](https://gitlab.tu-berlin.de/rsim/bigearthnet-models/blob/master/prep_splits.py) generates `TFRecord` files for train, validation and test sets from the BigEarthNet archive. To train or evaluate any model, required TFRecord files should be first prepared.
* Config files of each model, which was used to train corresponding model, are given under `configs` folder. If you want to use those config files, you need to download pre-trained model weights and move to the folders (whose paths are written in the corresponding JSON file).
* TensorFlow package should be installed. All codes are tested with Python 2.7, TensorFlow 1.3 and Ubuntu 16.04.
## Training
The script `train.py` expects a `JSON` configuration file path as a comand line argument. This file contains the following parameters:
* `model_name`: The name of the Python code containing the corresponding deep learning model. The code must be located under the `models` directory. The model class will be loaded dynamically based on the `model_name` parameter: `model = importlib.import_module('models.' + args['model_name']).dnn_model(nb_class)`
* `batch_size`: Batch size used during training
* `nb_epoch`: The number of epochs for the training
* `learning_rate`: The initial learning rate
* `out_dir`: The path where all log files and checkpoints will be saved.
* `save_checkpoint_after_iteration`: The iteration after which checkpoint saving should start, i.e., no checkpoints are saved before. Set to zero to always have checkpoints saved.
* `save_checkpoint_per_iteration`: The number of iterations per which a checkpoint is written, i.e., when `iteration_index % save_checkpoint_per_iteration == 0`.
* `tr_tf_record_files`: An array containing `TFRecord` files for training.
* `val_tf_record_files`: An array containing `TFRecord` files for validation (not used for now).
* `label_type`: The type of the labels used, either `original` or `compact`. Find more info about this [here](https://gitlab.tu-berlin.de/rsim/bigearthnet-models/blob/master/README.md).
* `fine_tune`: A flag to indicate if the training of the model will continue from the existing checkpoint whose path will be defined by `pretrained_model_path`.
* `model_file`: The base name of a pre-trained model snapshot (i.e., checkpoint).
* `shuffle_buffer_size`: The number of elements which will be shuffled at the beginning of each epoch. It is not recommended to have large shuffle buffer if you don't have enough space in memory.
* `training_size`: The size of the training set. If you are using training set suggested in [here](https://gitlab.tu-berlin.de/rsim/bigearthnet-models/), it is already set.
## Evaluation
The script `eval.py` expects a `JSON` configuration file. The needed parameters are as follows:
* `model_name`: The name of the Python code containing the corresponding deep learning model. The code must be located under the `models` directory. The model class will be loaded dynamically based on the `model_name` parameter: `model = importlib.import_module('models.' + args['model_name']).dnn_model(nb_class)`
* `batch_size`: Batch size used during evaluation
* `out_dir`: The path where all log files and checkpoints will be saved.
* `test_tf_record_files`: An array containing `TFRecord` files for evaluation.
* `label_type`: The type of the labels used, either `original` or `compact`. Find more info about this [here](https://gitlab.tu-berlin.de/rsim/bigearthnet-models/blob/master/README.md).
* `model_file`: The base name of a pre-trained model snapshot (i.e., checkpoint).
* `test_size`: The size of the test set. If you are using test set suggested in [here](https://gitlab.tu-berlin.de/rsim/bigearthnet-models/), it is already set.
## Bugs and Requests
If you face a bug or have a feature request, please create an issue:
https://gitlab.tubit.tu-berlin.de/rsim/bigearthnet-models/issues
Authors
-------
**Gencer Sümbül**
http://www.user.tu-berlin.de/gencersumbul/
**Tristan Kreuziger**
https://www.rsim.tu-berlin.de/menue/team/tristan_kreuziger/
# License
The code in this repository to facilitate the use of the BigEarthNet archive is licensed under the **MIT License**:
```
MIT License
Copyright (c) 2019 The BigEarthNet Authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
```
{
"model_name": "K-BranchCNN",
"batch_size":1000,
"nb_epoch":100,
"learning_rate":1e-3,
"save_checkpoint_per_iteration": 300,
"save_checkpoint_after_iteration": 3000,
"out_dir": "model_weights/original_labels/K-BranchCNN",
"model_file": "model_weights/original_labels/K-BranchCNN/K-BranchCNN_BigEarthNet_original_labels",
"label_type": "original"
}
\ No newline at end of file
This folder contains configuration examples for the models included in the repository. The settings in `base.json` are always applied and then updated with the settings of the specified config file.
\ No newline at end of file
{
"model_name": "ResNet101",
"batch_size":500,
"nb_epoch":100,
"learning_rate":1e-3,
"save_checkpoint_per_iteration": 600,
"save_checkpoint_after_iteration": 6000,
"out_dir": "model_weights/original_labels/ResNet101",
"model_file": "model_weights/original_labels/ResNet101/ResNet101_BigEarthNet_original_labels",
"label_type": "original"
}
\ No newline at end of file
{
"model_name": "ResNet152",
"batch_size":256,
"nb_epoch":100,
"learning_rate":1e-3,
"save_checkpoint_per_iteration": 300,
"save_checkpoint_after_iteration": 3000,
"out_dir": "model_weights/original_labels/ResNet152",
"model_file": "model_weights/original_labels/ResNet152/ResNet152_BigEarthNet_original_labels",
"label_type": "original"
}
\ No newline at end of file
{
"model_name": "ResNet50",
"batch_size":500,
"nb_epoch":100,
"learning_rate":1e-3,
"save_checkpoint_per_iteration": 600,
"save_checkpoint_after_iteration": 6000,
"out_dir": "model_weights/original_labels/ResNet50",
"model_file": "model_weights/original_labels/ResNet50/ResNet50_BigEarthNet_original_labels",
"label_type": "original"
}
\ No newline at end of file
{
"model_name": "VGG16",
"batch_size":1000,
"nb_epoch":100,
"learning_rate":1e-4,
"save_checkpoint_per_iteration": 300,
"save_checkpoint_after_iteration": 3000,
"out_dir": "model_weights/original_labels/VGG16",
"model_file": "model_weights/original_labels/VGG16/VGG16_BigEarthNet_original_labels",
"label_type": "original"
}
\ No newline at end of file
{
"model_name": "VGG19",
"batch_size":1000,
"nb_epoch":100,
"learning_rate":1e-4,
"save_checkpoint_per_iteration": 300,
"save_checkpoint_after_iteration": 3000,
"out_dir": "model_weights/original_labels/VGG19",
"model_file": "model_weights/original_labels/VGG19/models/VGG16_BigEarthNet_original_labels",
"label_type": "original"
}
\ No newline at end of file
{
"model_name": "ResNet50",
"batch_size":1000,
"nb_epoch":100,
"learning_rate":1e-3,
"save_checkpoint_after_iteration": 0,
"save_checkpoint_per_iteration": 1,
"tr_tf_record_files": ["TFRecord_files/train.tfrecord"],
"val_tf_record_files": ["TFRecord_files/val.tfrecord"],
"test_tf_record_files": ["TFRecord_files/test.tfrecord"],
"label_type": "original",
"fine_tune": false,
"model_file": "model_weights/original_labels/ResNet50/ResNet50_BigEarthNet_original_labels",
"shuffle_buffer_size": 130000,
"training_size": 269695,
"test_size": 125866
}
# -*- coding: utf-8 -*-
#
# This script can be used to evaluate the performance of a deep learning model, pre-trained on the BigEarthNet.
#
# To run the code, you need to provide the json file which was used for training before.
#
# Author: Gencer Sumbul, http://www.user.tu-berlin.de/gencersumbul/
# Email: gencer.suembuel@tu-berlin.de
# Date: 23 Dec 2019
# Version: 1.0.1
# Usage: eval.py [CONFIG_FILE_PATH]
from __future__ import print_function
import numpy as np
import tensorflow as tf
import subprocess, time, os
import argparse
from BigEarthNet import BigEarthNet
from utils import get_metrics
import json
import importlib
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def eval_model(args):
with tf.Session() as sess:
iterator = BigEarthNet(
args['test_tf_record_files'],
args['batch_size'],
1,
0
).batch_iterator
nb_iteration = int(np.ceil(float(args['test_size']) / args['batch_size']))
iterator_ins = iterator.get_next()
model = importlib.import_module('models.' + args['model_name']).DNN_model(args['label_type'])
model.create_network()
variables_to_restore = tf.global_variables()
metric_names, metric_means, metric_update_ops = get_metrics(model.multi_hot_label, model.predictions, model.probabilities)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
model_saver = tf.train.Saver(max_to_keep=0, var_list=variables_to_restore)
model_file = args['model_file']
model_saver.restore(sess, model_file)
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(os.path.join(args['out_dir'], 'logs', 'test'), sess.graph)
iteration_idx = 0
progress_bar = tf.contrib.keras.utils.Progbar(target=nb_iteration)
eval_res = {}
while True:
try:
batch_dict = sess.run(iterator_ins)
iteration_idx += 1
progress_bar.update(iteration_idx)
except tf.errors.OutOfRangeError:
print()
means = sess.run(metric_means[0])
for idx, name in enumerate(metric_names[0]):
eval_res[name] = str(means[idx])
print(name, means[idx])
break
sess_res = sess.run([metric_update_ops, summary_op] + metric_means[1], feed_dict=model.feed_dict(
batch_dict))
summary_writer.add_summary(sess_res[1], iteration_idx)
metric_means_res = sess_res[2:]
for idx, name in enumerate(metric_names[1]):
eval_res[name] = str(metric_means_res[idx])
print(name, metric_means_res[idx])
with open(os.path.join(args['out_dir'], 'eval_result.json'), 'wb') as f:
json.dump(eval_res, f)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='test arguments')
parser.add_argument('settings', help='json settings file')
parser_args = parser.parse_args()
with open('configs/base.json', 'rb') as f:
args = json.load(f)
with open(os.path.realpath(parser_args.settings), 'rb') as f:
model_args = json.load(f)
args.update(model_args)
eval_model(args)
You can find the links of pre-trained model weights on the [BigEarthNet Deep Models repository](https://gitlab.tu-berlin.de/rsim/bigearthnet-models). After downloading the zip files, you can directly extract each file to here that is compatible with existing configuration JSON files.
\ No newline at end of file
# -*- coding: utf-8 -*-
#
# K-Branch CNN is proposed as the first module of the following system without an attention mechanism:
#
# G. Sumbul, B. Demir, "A Novel Multi-Attention Driven System for Multi-Label Remote Sensing Image Classification",
# IEEE International Conference on Geoscience and Remote Sensing Symposium, Yokohama, Japan, 2019.
#
# Author: Gencer Sumbul, http://www.user.tu-berlin.de/gencersumbul/
# Email: gencer.suembuel@tu-berlin.de
# Date: 23 Dec 2019
# Version: 1.0.1
import tensorflow as tf
from models.main_model import Model
SEED = 42
class DNN_model(Model):
def __init__(self, label_type):
Model.__init__(self, label_type)
self.feature_size = 128
self.nb_bands_10m = 4
self.nb_bands_20m = 6
self.nb_bands_60m = 2
def fully_connected_block(self, inputs, nb_neurons, is_training, name):
with tf.variable_scope(name):
fully_connected_res = tf.layers.dense(
inputs = inputs,
units = nb_neurons,
activation = None,
use_bias = True,
kernel_initializer= tf.contrib.layers.xavier_initializer_conv2d(seed = SEED),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=tf.contrib.layers.l2_regularizer(2e-5),
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name = 'fc_layer',
reuse=None
)
batch_res = tf.layers.batch_normalization(inputs = fully_connected_res, name = 'batch_norm', training = is_training)
return tf.nn.relu(features = batch_res, name = 'relu')
def conv_block(self, inputs, nb_filter, filter_size, is_training, name):
with tf.variable_scope(name):
conv_res = tf.layers.conv2d(
inputs = inputs,
filters = nb_filter,
kernel_size = filter_size,
strides = (1, 1),
padding = 'same',
data_format = 'channels_last',
dilation_rate = (1, 1),
activation = None,
use_bias = True,
kernel_initializer = tf.contrib.layers.xavier_initializer_conv2d(seed = SEED),
bias_initializer = tf.zeros_initializer(),
kernel_regularizer = tf.contrib.layers.l2_regularizer(2e-5),
bias_regularizer = None,
activity_regularizer = None,
trainable = True,
name = 'conv_layer',
reuse = None
)
batch_res = tf.layers.batch_normalization(inputs = conv_res, name = 'batch_norm', training = is_training)
return tf.nn.relu(features = batch_res, name = 'relu')
def pooling(self, inputs, name):
return tf.nn.max_pool(
value = inputs,
ksize = [1, 2, 2, 1],
strides = [1, 2, 2, 1],
padding = "VALID",
data_format ='NHWC',
name = name
)
def dropout(self, inputs, drop_rate, is_training, name):
return tf.layers.dropout(
inputs,
rate = drop_rate,
noise_shape = None,
seed = SEED,
training = is_training,
name = name
)
def convert_image_to_uint8(self, img_batch):
return tf.map_fn(lambda x: tf.image.convert_image_dtype((tf.image.per_image_standardization(x) + 1.) / 2., dtype=tf.uint8, saturate=True), img_batch, dtype=tf.uint8)
def branch_model_10m(self, inputs, is_training):
with tf.variable_scope('CNN_10m_branch'):
out = self.conv_block(inputs, 32, [5,5], is_training, 'conv_block_0')
out = self.pooling(out, 'max_pooling')
out = self.dropout(out, 0.25, is_training, 'dropout_0')
out = self.conv_block(out, 32, [5,5], is_training, 'conv_block_1')
out = self.pooling(out, 'max_pooling_1')
out = self.dropout(out, 0.25, is_training, 'dropout_1')
out = self.conv_block(out, 64, [3,3], is_training, 'conv_block_2')
out = self.dropout(out, 0.25, is_training, 'dropout_2')
out = tf.contrib.layers.flatten(out)
out = self.fully_connected_block(out, self.feature_size, is_training, 'fc_block_0')
feature = self.dropout(out, 0.5, is_training, 'dropout_3')
return feature
def branch_model_20m(self, inputs, is_training):
with tf.variable_scope('CNN_20m_branch'):
out = self.conv_block(inputs, 32, [3,3], is_training, 'conv_block_0')
out = self.pooling(out, 'max_pooling_0')
out = self.dropout(out, 0.25, is_training, 'dropout_0')
out = self.conv_block(out, 32, [3,3], is_training, 'conv_block_1')
out = self.dropout(out, 0.25, is_training, 'dropout_1')
out = self.conv_block(out, 64, [3,3], is_training, 'conv_block_2')
out = self.dropout(out, 0.25, is_training, 'dropout_2')
out = tf.contrib.layers.flatten(out)
out = self.fully_connected_block(out, self.feature_size, is_training, 'fc_block_0')
feature = self.dropout(out, 0.5, is_training, 'dropout_3')
return feature
def create_network(self):
branch_features = []
for img_bands, nb_bands, branch_model in zip(
[self.bands_10m, self.bands_20m],
[self.nb_bands_10m, self.nb_bands_20m],
[self.branch_model_10m, self.branch_model_20m]
):
branch_features.append(tf.reshape(branch_model(img_bands, self.is_training), [-1, self.feature_size]))
with tf.variable_scope('feature_fusion'):
patches_concat_embed_ = tf.concat(branch_features, -1)
patches_concat_embed_ = self.fully_connected_block(patches_concat_embed_, self.feature_size, self.is_training, 'fc_block_0')
patches_concat_embed_ = self.dropout(patches_concat_embed_, 0.25, self.is_training, 'dropout_0')
initializer = tf.contrib.layers.xavier_initializer(seed = SEED)
with tf.variable_scope('classification'):
res = self.dropout(patches_concat_embed_, 0.5, self.is_training, 'dropout_0')
self.logits = tf.layers.dense(
inputs = res,
units = self.nb_class,
activation=None,
use_bias=True,
kernel_initializer=initializer,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name='fc_layer',
reuse=None
)
self.probabilities = tf.nn.sigmoid(self.logits)
self.predictions = tf.cast(self.probabilities >= self.prediction_threshold, tf.float32)
This folder contains the implementation of several models of well-known architectures. Please note that to use the ResNet and VGG models, you need to download the corresponding folder of the TensorFlow-Slim image classification model library.
\ No newline at end of file
# -*- coding: utf-8 -*-
#
# ResNet101 model definition is based on TensorFlow Slim implementation.
#
# Author: Gencer Sumbul, http://www.user.tu-berlin.de/gencersumbul/
# Email: gencer.suembuel@tu-berlin.de
# Date: 23 Dec 2019
# Version: 1.0.1
import tensorflow as tf
from nets.resnet_utils import resnet_arg_scope
from nets.resnet_v1 import resnet_v1_101
from models.main_model import Model
class DNN_model(Model):
def create_network(self):
with tf.contrib.slim.arg_scope(resnet_arg_scope()):
logits, end_points = resnet_v1_101(
self.img,
num_classes = self.nb_class,
is_training = self.is_training,
global_pool=True,
spatial_squeeze=True
)
self.logits = logits
self.probabilities = tf.nn.sigmoid(self.logits)
self.predictions = tf.cast(self.probabilities >= self.prediction_threshold, tf.float32)
# -*- coding: utf-8 -*-
#
# ResNet152 model definition is based on TensorFlow Slim implementation.
#
# Author: Gencer Sumbul, http://www.user.tu-berlin.de/gencersumbul/
# Email: gencer.suembuel@tu-berlin.de
# Date: 23 Dec 2019
# Version: 1.0.1
import tensorflow as tf
from nets.resnet_utils import resnet_arg_scope
from nets.resnet_v1 import resnet_v1_152
from models.main_model import Model
class DNN_model(Model):
def create_network(self):
with tf.contrib.slim.arg_scope(resnet_arg_scope()):
logits, end_points = resnet_v1_152(
self.img,
num_classes = self.nb_class,
is_training = self.is_training,
global_pool=True,
spatial_squeeze=True
)
self.logits = logits
self.probabilities = tf.nn.sigmoid(self.logits)
self.predictions = tf.cast(self.probabilities >= self.prediction_threshold, tf.float32)
# -*- coding: utf-8 -*-
#
# ResNet50 model definition is based on TensorFlow Slim implementation.
#
# Author: Gencer Sumbul, http://www.user.tu-berlin.de/gencersumbul/
# Email: gencer.suembuel@tu-berlin.de
# Date: 23 Dec 2019
# Version: 1.0.1
import tensorflow as tf
from nets.resnet_utils import resnet_arg_scope
from nets.resnet_v1 import resnet_v1_50
from models.main_model import Model
class DNN_model(Model):
def create_network(self):
with tf.contrib.slim.arg_scope(resnet_arg_scope()):
logits, end_points = resnet_v1_50(
self.img,
num_classes = self.nb_class,
is_training = self.is_training,
global_pool=True,
spatial_squeeze=True
)
self.logits = logits
self.probabilities = tf.nn.sigmoid(self.logits)
self.predictions = tf.cast(self.probabilities >= self.prediction_threshold, tf.float32)
# -*- coding: utf-8 -*-
#
# VGG16 model definition is based on TensorFlow Slim implementation.
#
# Author: Gencer Sumbul, http://www.user.tu-berlin.de/gencersumbul/
# Email: gencer.suembuel@tu-berlin.de
# Date: 23 Dec 2019
# Version: 1.0.1
import tensorflow as tf
from nets.vgg import vgg_16, vgg_arg_scope
from models.main_model import Model
class DNN_model(Model):
def create_network(self):
with tf.contrib.slim.arg_scope(vgg_arg_scope()):
logits, end_points = vgg_16(
self.img,
num_classes = self.nb_class,
is_training = self.is_training,
fc_conv_padding = 'SAME',
global_pool=True
)
self.logits = logits
self.probabilities = tf.nn.sigmoid(self.logits)
self.predictions = tf.cast(self.probabilities >= self.prediction_threshold, tf.float32)
# -*- coding: utf-8 -*-
#
# VGG19 model definition is based on TensorFlow Slim implementation.
#
# Author: Gencer Sumbul, http://www.user.tu-berlin.de/gencersumbul/
# Email: gencer.suembuel@tu-berlin.de
# Date: 23 Dec 2019
# Version: 1.0.1
import tensorflow as tf
from nets.vgg import vgg_19, vgg_arg_scope
from models.main_model import Model
class DNN_model(Model):
def create_network(self):
with tf.contrib.slim.arg_scope(vgg_arg_scope()):
logits, end_points = vgg_19(
self.img,
num_classes = self.nb_class,
is_training = self.is_training,
fc_conv_padding = 'SAME',
global_pool=True
)
self.logits = logits
self.probabilities = tf.nn.sigmoid(self.logits)
self.predictions = tf.cast(self.probabilities >= self.prediction_threshold, tf.float32)
# -*- coding: utf-8 -*-
#
# Main model to define initial placeholders, feed dictionary, and image normalization.
#
# Author: Gencer Sumbul, http://www.user.tu-berlin.de/gencersumbul/
# Email: gencer.suembuel@tu-berlin.de
# Date: 23 Dec 2019
# Version: 1.0.1
import numpy as np
import tensorflow as tf
from nets.resnet_utils import resnet_arg_scope
from nets.resnet_v1 import resnet_v1_50
from BigEarthNet import BAND_STATS
from utils import sparse_to_dense
class Model:
def __init__(self, label_type):
self.label_type = label_type
self.prediction_threshold = 0.5
self.is_training = tf.placeholder(tf.bool, [])
self.nb_class = 19 if label_type == 'compact' else 43
self.B01 = tf.placeholder(tf.float32, [None, 20, 20], name='B01')
self.B02 = tf.placeholder(tf.float32, [None, 120, 120], name='B02')
self.B03 = tf.placeholder(tf.float32, [None, 120, 120], name='B03')
self.B04 = tf.placeholder(tf.float32, [None, 120, 120], name='B04')
self.B05 = tf.placeholder(tf.float32, [None, 60, 60], name='B05')
self.B06 = tf.placeholder(tf.float32, [None, 60, 60], name='B06')
self.B07 = tf.placeholder(tf.float32, [None, 60, 60], name='B07')
self.B08 = tf.placeholder(tf.float32, [None, 120, 120], name='B08')
self.B8A = tf.placeholder(tf.float32, [None, 60, 60], name='B8A')
self.B09 = tf.placeholder(tf.float32, [None, 20, 20], name='B09')
self.B11 = tf.placeholder(tf.float32, [None, 60, 60], name='B11')
self.B12 = tf.placeholder(tf.float32, [None, 60, 60], name='B12')
self.bands_10m = tf.stack([self.B04, self.B03, self.B02, self.B08], axis=3)
self.bands_20m = tf.stack([self.B05, self.B06, self.B07, self.B8A, self.B11, self.B12], axis=3)
self.bands_60m = tf.stack([self.B01, self.B09], axis=3)
self.img = tf.concat(
[
self.bands_10m ,
tf.image.resize_bicubic(
self.bands_20m,
[120, 120]