Module delta.extensions.layers.efficientnet

An implementation of EfficientNet. This is taken from tensorflow but modified to remove initial layers.

Expand source code
# Copyright © 2020, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The DELTA (Deep Earth Learning, Tools, and Analysis) platform is
# licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#        http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

#pylint:disable=dangerous-default-value, too-many-arguments
"""
An implementation of EfficientNet. This is
taken from tensorflow but modified to remove initial layers.
"""
# https://github.com/keras-team/keras-applications/blob/master/keras_applications/efficientnet.py

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from copy import deepcopy
import math

import tensorflow
#from tensorflow.keras.applications import imagenet_utils

from delta.config.extensions import register_layer

backend = tensorflow.keras.backend
layers = tensorflow.keras.layers
models = tensorflow.keras.models
keras_utils = tensorflow.keras.utils

#BASE_WEIGHTS_PATH = (
#    'https://github.com/Callidior/keras-applications/'
#    'releases/download/efficientnet/')
#WEIGHTS_HASHES = {
#    'b0': ('e9e877068bd0af75e0a36691e03c072c',
#           '345255ed8048c2f22c793070a9c1a130'),
#    'b1': ('8f83b9aecab222a9a2480219843049a1',
#           'b20160ab7b79b7a92897fcb33d52cc61'),
#    'b2': ('b6185fdcd190285d516936c09dceeaa4',
#           'c6e46333e8cddfa702f4d8b8b6340d70'),
#    'b3': ('b2db0f8aac7c553657abb2cb46dcbfbb',
#           'e0cf8654fad9d3625190e30d70d0c17d'),
#    'b4': ('ab314d28135fe552e2f9312b31da6926',
#           'b46702e4754d2022d62897e0618edc7b'),
#    'b5': ('8d60b903aff50b09c6acf8eaba098e09',
#           '0a839ac36e46552a881f2975aaab442f'),
#    'b6': ('a967457886eac4f5ab44139bdd827920',
#           '375a35c17ef70d46f9c664b03b4437f2'),
#    'b7': ('e964fd6e26e9a4c144bcb811f2a10f20',
#           'd55674cc46b805f4382d18bc08ed43c1')
#}

DEFAULT_BLOCKS_ARGS = [
    {'kernel_size': 3, 'repeats': 1, 'filters_in': 32, 'filters_out': 16,
     'expand_ratio': 1, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25},
    {'kernel_size': 3, 'repeats': 2, 'filters_in': 16, 'filters_out': 24,
     'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
    {'kernel_size': 5, 'repeats': 2, 'filters_in': 24, 'filters_out': 40,
     'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
    {'kernel_size': 3, 'repeats': 3, 'filters_in': 40, 'filters_out': 80,
     'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
    {'kernel_size': 5, 'repeats': 3, 'filters_in': 80, 'filters_out': 112,
     'expand_ratio': 6, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25},
    {'kernel_size': 5, 'repeats': 4, 'filters_in': 112, 'filters_out': 192,
     'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
    {'kernel_size': 3, 'repeats': 1, 'filters_in': 192, 'filters_out': 320,
     'expand_ratio': 6, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25}
]

CONV_KERNEL_INITIALIZER = {
    'class_name': 'VarianceScaling',
    'config': {
        'scale': 2.0,
        'mode': 'fan_out',
        # EfficientNet actually uses an untruncated normal distribution for
        # initializing conv layers, but keras.initializers.VarianceScaling use
        # a truncated distribution.
        # We decided against a custom initializer for better serializability.
        'distribution': 'normal'
    }
}

DENSE_KERNEL_INITIALIZER = {
    'class_name': 'VarianceScaling',
    'config': {
        'scale': 1. / 3.,
        'mode': 'fan_out',
        'distribution': 'uniform'
    }
}

def correct_pad(inputs, kernel_size):
    """Returns a tuple for zero-padding for 2D convolution with downsampling.
    # Arguments
        input_size: An integer or tuple/list of 2 integers.
        kernel_size: An integer or tuple/list of 2 integers.
    # Returns
        A tuple.
    """
    img_dim = 2 if backend.image_data_format() == 'channels_first' else 1
    input_size = backend.int_shape(inputs)[img_dim:(img_dim + 2)]

    if isinstance(kernel_size, int):
        kernel_size = (kernel_size, kernel_size)

    if input_size[0] is None:
        adjust = (1, 1)
    else:
        adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)

    correct = (kernel_size[0] // 2, kernel_size[1] // 2)

    return ((correct[0] - adjust[0], correct[0]),
            (correct[1] - adjust[1], correct[1]))

def swish(x):
    """Swish activation function.

    # Arguments
        x: Input tensor.

    # Returns
        The Swish activation: `x * sigmoid(x)`.

    # References
        [Searching for Activation Functions](https://arxiv.org/abs/1710.05941)
    """
    if backend.backend() == 'tensorflow':
        try:
            # The native TF implementation has a more
            # memory-efficient gradient implementation
            return backend.tf.nn.swish(x)
        except AttributeError:
            pass

    return x * backend.sigmoid(x)


def block(inputs, activation_fn=swish, drop_rate=0., name='',
          filters_in=32, filters_out=16, kernel_size=3, strides=1,
          expand_ratio=1, se_ratio=0., id_skip=True):
    """A mobile inverted residual block.

    # Arguments
        inputs: input tensor.
        activation_fn: activation function.
        drop_rate: float between 0 and 1, fraction of the input units to drop.
        name: string, block label.
        filters_in: integer, the number of input filters.
        filters_out: integer, the number of output filters.
        kernel_size: integer, the dimension of the convolution window.
        strides: integer, the stride of the convolution.
        expand_ratio: integer, scaling coefficient for the input filters.
        se_ratio: float between 0 and 1, fraction to squeeze the input filters.
        id_skip: boolean.

    # Returns
        output tensor for the block.
    """
    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

    # Expansion phase
    filters = filters_in * expand_ratio
    if expand_ratio != 1:
        x = layers.Conv2D(filters, 1,
                          padding='same',
                          use_bias=False,
                          kernel_initializer=CONV_KERNEL_INITIALIZER,
                          name=name + 'expand_conv')(inputs)
        x = layers.BatchNormalization(axis=bn_axis, name=name + 'expand_bn')(x)
        x = layers.Activation(activation_fn, name=name + 'expand_activation')(x)
    else:
        x = inputs

    # Depthwise Convolution
    if strides == 2:
        x = layers.ZeroPadding2D(padding=correct_pad(x, kernel_size),
                                 name=name + 'dwconv_pad')(x)
        conv_pad = 'valid'
    else:
        conv_pad = 'same'
    x = layers.DepthwiseConv2D(kernel_size,
                               strides=strides,
                               padding=conv_pad,
                               use_bias=False,
                               depthwise_initializer=CONV_KERNEL_INITIALIZER,
                               name=name + 'dwconv')(x)
    x = layers.BatchNormalization(axis=bn_axis, name=name + 'bn')(x)
    x = layers.Activation(activation_fn, name=name + 'activation')(x)

    # Squeeze and Excitation phase
    if 0 < se_ratio <= 1:
        filters_se = max(1, int(filters_in * se_ratio))
        se = layers.GlobalAveragePooling2D(name=name + 'se_squeeze')(x)
        if bn_axis == 1:
            se = layers.Reshape((filters, 1, 1), name=name + 'se_reshape')(se)
        else:
            se = layers.Reshape((1, 1, filters), name=name + 'se_reshape')(se)
        se = layers.Conv2D(filters_se, 1,
                           padding='same',
                           activation=activation_fn,
                           kernel_initializer=CONV_KERNEL_INITIALIZER,
                           name=name + 'se_reduce')(se)
        se = layers.Conv2D(filters, 1,
                           padding='same',
                           activation='sigmoid',
                           kernel_initializer=CONV_KERNEL_INITIALIZER,
                           name=name + 'se_expand')(se)
        if backend.backend() == 'theano':
            # For the Theano backend, we have to explicitly make
            # the excitation weights broadcastable.
            se = layers.Lambda(
                lambda x: backend.pattern_broadcast(x, [True, True, True, False]),
                output_shape=lambda input_shape: input_shape,
                name=name + 'se_broadcast')(se)
        x = layers.multiply([x, se], name=name + 'se_excite')

    # Output phase
    x = layers.Conv2D(filters_out, 1,
                      padding='same',
                      use_bias=False,
                      kernel_initializer=CONV_KERNEL_INITIALIZER,
                      name=name + 'project_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis, name=name + 'project_bn')(x)
    if (id_skip is True and strides == 1 and filters_in == filters_out):
        if drop_rate > 0:
            x = layers.Dropout(drop_rate,
                               noise_shape=(None, 1, 1, 1),
                               name=name + 'drop')(x)
        x = layers.add([x, inputs], name=name + 'add')

    return x


def EfficientNet(width_coefficient,
                 depth_coefficient,
                 drop_connect_rate=0.2,
                 depth_divisor=8,
                 activation_fn=swish,
                 blocks_args=DEFAULT_BLOCKS_ARGS,
                 model_name='efficientnet',
                 weights='imagenet',
                 input_tensor=None,
                 input_shape=None,
                 name=None):
    #pylint: disable=too-many-locals
    if input_tensor is None:
        img_input = layers.Input(shape=input_shape)
    else:
        if not backend.is_keras_tensor(input_tensor):
            img_input = layers.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

    def round_filters(filters, divisor=depth_divisor):
        """Round number of filters based on depth multiplier."""
        filters *= width_coefficient
        new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor)
        # Make sure that round down does not go down by more than 10%.
        if new_filters < 0.9 * filters:
            new_filters += divisor
        return int(new_filters)

    def round_repeats(repeats):
        """Round number of repeats based on depth multiplier."""
        return int(math.ceil(depth_coefficient * repeats))

    # Build stem
    x = img_input
    x = layers.ZeroPadding2D(padding=correct_pad(x, 3),
                             name='stem_conv_pad')(x)
    x = layers.Conv2D(round_filters(32), 3,
                      strides=2,
                      padding='valid',
                      use_bias=False,
                      kernel_initializer=CONV_KERNEL_INITIALIZER,
                      name='stem_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis, name='stem_bn')(x)
    x = layers.Activation(activation_fn, name='stem_activation')(x)

    # Build blocks
    blocks_args = deepcopy(blocks_args)

    b = 0
    blocks = float(sum(args['repeats'] for args in blocks_args))
    for (i, args) in enumerate(blocks_args):
        assert args['repeats'] > 0
        # Update block input and output filters based on depth multiplier.
        args['filters_in'] = round_filters(args['filters_in'])
        args['filters_out'] = round_filters(args['filters_out'])

        for j in range(round_repeats(args.pop('repeats'))):
            # The first block needs to take care of stride and filter size increase.
            if j > 0:
                args['strides'] = 1
                args['filters_in'] = args['filters_out']
            x = block(x, activation_fn, drop_connect_rate * b / blocks,
                      name='block{}{}_'.format(i + 1, chr(j + 97)), **args)
            b += 1

    # Build top
    #x = layers.Conv2D(round_filters(1280), 1,
    #                  padding='same',
    #                  use_bias=False,
    #                  kernel_initializer=CONV_KERNEL_INITIALIZER,
    #                  name='top_conv')(x)
    #x = layers.BatchNormalization(axis=bn_axis, name='top_bn')(x)
    #x = layers.Activation(activation_fn, name='top_activation')(x)
    #if include_top:
    #    x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
    #    if dropout_rate > 0:
    #        x = layers.Dropout(dropout_rate, name='top_dropout')(x)
    #    x = layers.Dense(classes,
    #                     activation='softmax',
    #                     kernel_initializer=DENSE_KERNEL_INITIALIZER,
    #                     name='probs')(x)
    #else:
    #    if pooling == 'avg':
    #        x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
    #    elif pooling == 'max':
    #        x = layers.GlobalMaxPooling2D(name='max_pool')(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = keras_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    model = models.Model(inputs, x, name=name if name is not None else model_name)

    # Load weights.
    #if weights == 'imagenet':
    #    if include_top:
    #        file_suff = '_weights_tf_dim_ordering_tf_kernels_autoaugment.h5'
    #        file_hash = WEIGHTS_HASHES[model_name[-2:]][0]
    #    else:
    #        file_suff = '_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5'
    #        file_hash = WEIGHTS_HASHES[model_name[-2:]][1]
    #    file_name = model_name + file_suff
    #    weights_path = keras_utils.get_file(file_name,
    #                                        BASE_WEIGHTS_PATH + file_name,
    #                                        cache_subdir='models',
    #                                        file_hash=file_hash)
    #    model.load_weights(weights_path, by_name=True, skip_mismatch=True)
    if weights is not None:
        model.load_weights(weights)

    return model


#def EfficientNetB0(include_top=True,
#                   input_tensor=None,
#                   input_shape=None,
#                   **kwargs):
#    return EfficientNet(1.0, 1.0,
#                        model_name='efficientnet-b0',
#                        include_top=include_top,
#                        input_tensor=input_tensor, input_shape=input_shape,
#                        **kwargs)
#
#
#def EfficientNetB1(include_top=True,
#                   input_tensor=None,
#                   input_shape=None,
#                   **kwargs):
#    return EfficientNet(1.0, 1.1,
#                        model_name='efficientnet-b1',
#                        include_top=include_top,
#                        input_tensor=input_tensor, input_shape=input_shape,
#                        **kwargs)
#
#
#def EfficientNetB2(include_top=True,
#                   input_tensor=None,
#                   input_shape=None,
#                   **kwargs):
#    return EfficientNet(1.1, 1.2,
#                        model_name='efficientnet-b2',
#                        include_top=include_top,
#                        input_tensor=input_tensor, input_shape=input_shape,
#                        **kwargs)
#
#
#def EfficientNetB3(include_top=True,
#                   input_tensor=None,
#                   input_shape=None,
#                   **kwargs):
#    return EfficientNet(1.2, 1.4,
#                        model_name='efficientnet-b3',
#                        include_top=include_top,
#                        input_tensor=input_tensor, input_shape=input_shape,
#                        **kwargs)
#
#
#def EfficientNetB4(include_top=True,
#                   input_tensor=None,
#                   input_shape=None,
#                   **kwargs):
#    return EfficientNet(1.4, 1.8,
#                        model_name='efficientnet-b4',
#                        include_top=include_top,
#                        input_tensor=input_tensor, input_shape=input_shape,
#                        **kwargs)
#
#
#def EfficientNetB5(include_top=True,
#                   input_tensor=None,
#                   input_shape=None,
#                   **kwargs):
#    return EfficientNet(1.6, 2.2,
#                        model_name='efficientnet-b5',
#                        include_top=include_top,
#                        input_tensor=input_tensor, input_shape=input_shape,
#                        **kwargs)
#
#
#def EfficientNetB6(include_top=True,
#                   input_tensor=None,
#                   input_shape=None,
#                   **kwargs):
#    return EfficientNet(1.8, 2.6,
#                        model_name='efficientnet-b6',
#                        include_top=include_top,
#                        input_tensor=input_tensor, input_shape=input_shape,
#                        **kwargs)
#
#
#def EfficientNetB7(include_top=True,
#                   input_tensor=None,
#                   input_shape=None,
#                   **kwargs):
#    return EfficientNet(2.0, 3.1,
#                        model_name='efficientnet-b7',
#                        include_top=include_top,
#                        input_tensor=input_tensor, input_shape=input_shape,
#                        **kwargs)

def DeltaEfficientNet(input_shape, width_coefficient=1.1, depth_coefficient=1.2, name=None):
    return EfficientNet(width_coefficient, depth_coefficient,
                        input_shape=input_shape, weights=None, name=name)

#def preprocess_input(x, data_format=None, **kwargs):
#    """Preprocesses a numpy array encoding a batch of images.
#
#    # Arguments
#        x: a 3D or 4D numpy array consists of RGB values within [0, 255].
#        data_format: data format of the image tensor.
#
#    # Returns
#        Preprocessed array.
#    """
#    return imagenet_utils.preprocess_input(x, data_format,
#                                           mode='torch', **kwargs)


#setattr(EfficientNetB0, '__doc__', EfficientNet.__doc__)
#setattr(EfficientNetB1, '__doc__', EfficientNet.__doc__)
#setattr(EfficientNetB2, '__doc__', EfficientNet.__doc__)
#setattr(EfficientNetB3, '__doc__', EfficientNet.__doc__)
#setattr(EfficientNetB4, '__doc__', EfficientNet.__doc__)
#setattr(EfficientNetB5, '__doc__', EfficientNet.__doc__)
#setattr(EfficientNetB6, '__doc__', EfficientNet.__doc__)
#setattr(EfficientNetB7, '__doc__', EfficientNet.__doc__)

register_layer('EfficientNet', DeltaEfficientNet)

Functions

def DeltaEfficientNet(input_shape, width_coefficient=1.1, depth_coefficient=1.2, name=None)
Expand source code
def DeltaEfficientNet(input_shape, width_coefficient=1.1, depth_coefficient=1.2, name=None):
    return EfficientNet(width_coefficient, depth_coefficient,
                        input_shape=input_shape, weights=None, name=name)
def EfficientNet(width_coefficient, depth_coefficient, drop_connect_rate=0.2, depth_divisor=8, activation_fn=<function swish>, blocks_args=[{'kernel_size': 3, 'repeats': 1, 'filters_in': 32, 'filters_out': 16, 'expand_ratio': 1, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25}, {'kernel_size': 3, 'repeats': 2, 'filters_in': 16, 'filters_out': 24, 'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25}, {'kernel_size': 5, 'repeats': 2, 'filters_in': 24, 'filters_out': 40, 'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25}, {'kernel_size': 3, 'repeats': 3, 'filters_in': 40, 'filters_out': 80, 'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25}, {'kernel_size': 5, 'repeats': 3, 'filters_in': 80, 'filters_out': 112, 'expand_ratio': 6, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25}, {'kernel_size': 5, 'repeats': 4, 'filters_in': 112, 'filters_out': 192, 'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25}, {'kernel_size': 3, 'repeats': 1, 'filters_in': 192, 'filters_out': 320, 'expand_ratio': 6, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25}], model_name='efficientnet', weights='imagenet', input_tensor=None, input_shape=None, name=None)
Expand source code
def EfficientNet(width_coefficient,
                 depth_coefficient,
                 drop_connect_rate=0.2,
                 depth_divisor=8,
                 activation_fn=swish,
                 blocks_args=DEFAULT_BLOCKS_ARGS,
                 model_name='efficientnet',
                 weights='imagenet',
                 input_tensor=None,
                 input_shape=None,
                 name=None):
    #pylint: disable=too-many-locals
    if input_tensor is None:
        img_input = layers.Input(shape=input_shape)
    else:
        if not backend.is_keras_tensor(input_tensor):
            img_input = layers.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

    def round_filters(filters, divisor=depth_divisor):
        """Round number of filters based on depth multiplier."""
        filters *= width_coefficient
        new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor)
        # Make sure that round down does not go down by more than 10%.
        if new_filters < 0.9 * filters:
            new_filters += divisor
        return int(new_filters)

    def round_repeats(repeats):
        """Round number of repeats based on depth multiplier."""
        return int(math.ceil(depth_coefficient * repeats))

    # Build stem
    x = img_input
    x = layers.ZeroPadding2D(padding=correct_pad(x, 3),
                             name='stem_conv_pad')(x)
    x = layers.Conv2D(round_filters(32), 3,
                      strides=2,
                      padding='valid',
                      use_bias=False,
                      kernel_initializer=CONV_KERNEL_INITIALIZER,
                      name='stem_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis, name='stem_bn')(x)
    x = layers.Activation(activation_fn, name='stem_activation')(x)

    # Build blocks
    blocks_args = deepcopy(blocks_args)

    b = 0
    blocks = float(sum(args['repeats'] for args in blocks_args))
    for (i, args) in enumerate(blocks_args):
        assert args['repeats'] > 0
        # Update block input and output filters based on depth multiplier.
        args['filters_in'] = round_filters(args['filters_in'])
        args['filters_out'] = round_filters(args['filters_out'])

        for j in range(round_repeats(args.pop('repeats'))):
            # The first block needs to take care of stride and filter size increase.
            if j > 0:
                args['strides'] = 1
                args['filters_in'] = args['filters_out']
            x = block(x, activation_fn, drop_connect_rate * b / blocks,
                      name='block{}{}_'.format(i + 1, chr(j + 97)), **args)
            b += 1

    # Build top
    #x = layers.Conv2D(round_filters(1280), 1,
    #                  padding='same',
    #                  use_bias=False,
    #                  kernel_initializer=CONV_KERNEL_INITIALIZER,
    #                  name='top_conv')(x)
    #x = layers.BatchNormalization(axis=bn_axis, name='top_bn')(x)
    #x = layers.Activation(activation_fn, name='top_activation')(x)
    #if include_top:
    #    x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
    #    if dropout_rate > 0:
    #        x = layers.Dropout(dropout_rate, name='top_dropout')(x)
    #    x = layers.Dense(classes,
    #                     activation='softmax',
    #                     kernel_initializer=DENSE_KERNEL_INITIALIZER,
    #                     name='probs')(x)
    #else:
    #    if pooling == 'avg':
    #        x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
    #    elif pooling == 'max':
    #        x = layers.GlobalMaxPooling2D(name='max_pool')(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = keras_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    model = models.Model(inputs, x, name=name if name is not None else model_name)

    # Load weights.
    #if weights == 'imagenet':
    #    if include_top:
    #        file_suff = '_weights_tf_dim_ordering_tf_kernels_autoaugment.h5'
    #        file_hash = WEIGHTS_HASHES[model_name[-2:]][0]
    #    else:
    #        file_suff = '_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5'
    #        file_hash = WEIGHTS_HASHES[model_name[-2:]][1]
    #    file_name = model_name + file_suff
    #    weights_path = keras_utils.get_file(file_name,
    #                                        BASE_WEIGHTS_PATH + file_name,
    #                                        cache_subdir='models',
    #                                        file_hash=file_hash)
    #    model.load_weights(weights_path, by_name=True, skip_mismatch=True)
    if weights is not None:
        model.load_weights(weights)

    return model
def block(inputs, activation_fn=<function swish>, drop_rate=0.0, name='', filters_in=32, filters_out=16, kernel_size=3, strides=1, expand_ratio=1, se_ratio=0.0, id_skip=True)

A mobile inverted residual block.

Arguments

inputs: input tensor.
activation_fn: activation function.
drop_rate: float between 0 and 1, fraction of the input units to drop.
name: string, block label.
filters_in: integer, the number of input filters.
filters_out: integer, the number of output filters.
kernel_size: integer, the dimension of the convolution window.
strides: integer, the stride of the convolution.
expand_ratio: integer, scaling coefficient for the input filters.
se_ratio: float between 0 and 1, fraction to squeeze the input filters.
id_skip: boolean.

Returns

output tensor for the block.
Expand source code
def block(inputs, activation_fn=swish, drop_rate=0., name='',
          filters_in=32, filters_out=16, kernel_size=3, strides=1,
          expand_ratio=1, se_ratio=0., id_skip=True):
    """A mobile inverted residual block.

    # Arguments
        inputs: input tensor.
        activation_fn: activation function.
        drop_rate: float between 0 and 1, fraction of the input units to drop.
        name: string, block label.
        filters_in: integer, the number of input filters.
        filters_out: integer, the number of output filters.
        kernel_size: integer, the dimension of the convolution window.
        strides: integer, the stride of the convolution.
        expand_ratio: integer, scaling coefficient for the input filters.
        se_ratio: float between 0 and 1, fraction to squeeze the input filters.
        id_skip: boolean.

    # Returns
        output tensor for the block.
    """
    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

    # Expansion phase
    filters = filters_in * expand_ratio
    if expand_ratio != 1:
        x = layers.Conv2D(filters, 1,
                          padding='same',
                          use_bias=False,
                          kernel_initializer=CONV_KERNEL_INITIALIZER,
                          name=name + 'expand_conv')(inputs)
        x = layers.BatchNormalization(axis=bn_axis, name=name + 'expand_bn')(x)
        x = layers.Activation(activation_fn, name=name + 'expand_activation')(x)
    else:
        x = inputs

    # Depthwise Convolution
    if strides == 2:
        x = layers.ZeroPadding2D(padding=correct_pad(x, kernel_size),
                                 name=name + 'dwconv_pad')(x)
        conv_pad = 'valid'
    else:
        conv_pad = 'same'
    x = layers.DepthwiseConv2D(kernel_size,
                               strides=strides,
                               padding=conv_pad,
                               use_bias=False,
                               depthwise_initializer=CONV_KERNEL_INITIALIZER,
                               name=name + 'dwconv')(x)
    x = layers.BatchNormalization(axis=bn_axis, name=name + 'bn')(x)
    x = layers.Activation(activation_fn, name=name + 'activation')(x)

    # Squeeze and Excitation phase
    if 0 < se_ratio <= 1:
        filters_se = max(1, int(filters_in * se_ratio))
        se = layers.GlobalAveragePooling2D(name=name + 'se_squeeze')(x)
        if bn_axis == 1:
            se = layers.Reshape((filters, 1, 1), name=name + 'se_reshape')(se)
        else:
            se = layers.Reshape((1, 1, filters), name=name + 'se_reshape')(se)
        se = layers.Conv2D(filters_se, 1,
                           padding='same',
                           activation=activation_fn,
                           kernel_initializer=CONV_KERNEL_INITIALIZER,
                           name=name + 'se_reduce')(se)
        se = layers.Conv2D(filters, 1,
                           padding='same',
                           activation='sigmoid',
                           kernel_initializer=CONV_KERNEL_INITIALIZER,
                           name=name + 'se_expand')(se)
        if backend.backend() == 'theano':
            # For the Theano backend, we have to explicitly make
            # the excitation weights broadcastable.
            se = layers.Lambda(
                lambda x: backend.pattern_broadcast(x, [True, True, True, False]),
                output_shape=lambda input_shape: input_shape,
                name=name + 'se_broadcast')(se)
        x = layers.multiply([x, se], name=name + 'se_excite')

    # Output phase
    x = layers.Conv2D(filters_out, 1,
                      padding='same',
                      use_bias=False,
                      kernel_initializer=CONV_KERNEL_INITIALIZER,
                      name=name + 'project_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis, name=name + 'project_bn')(x)
    if (id_skip is True and strides == 1 and filters_in == filters_out):
        if drop_rate > 0:
            x = layers.Dropout(drop_rate,
                               noise_shape=(None, 1, 1, 1),
                               name=name + 'drop')(x)
        x = layers.add([x, inputs], name=name + 'add')

    return x
def correct_pad(inputs, kernel_size)

Returns a tuple for zero-padding for 2D convolution with downsampling.

Arguments

input_size: An integer or tuple/list of 2 integers.
kernel_size: An integer or tuple/list of 2 integers.

Returns

A tuple.
Expand source code
def correct_pad(inputs, kernel_size):
    """Returns a tuple for zero-padding for 2D convolution with downsampling.
    # Arguments
        input_size: An integer or tuple/list of 2 integers.
        kernel_size: An integer or tuple/list of 2 integers.
    # Returns
        A tuple.
    """
    img_dim = 2 if backend.image_data_format() == 'channels_first' else 1
    input_size = backend.int_shape(inputs)[img_dim:(img_dim + 2)]

    if isinstance(kernel_size, int):
        kernel_size = (kernel_size, kernel_size)

    if input_size[0] is None:
        adjust = (1, 1)
    else:
        adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)

    correct = (kernel_size[0] // 2, kernel_size[1] // 2)

    return ((correct[0] - adjust[0], correct[0]),
            (correct[1] - adjust[1], correct[1]))
def swish(x)

Swish activation function.

Arguments

x: Input tensor.

Returns

The Swish activation: `x * sigmoid(x)`.

References

[Searching for Activation Functions](https://arxiv.org/abs/1710.05941)
Expand source code
def swish(x):
    """Swish activation function.

    # Arguments
        x: Input tensor.

    # Returns
        The Swish activation: `x * sigmoid(x)`.

    # References
        [Searching for Activation Functions](https://arxiv.org/abs/1710.05941)
    """
    if backend.backend() == 'tensorflow':
        try:
            # The native TF implementation has a more
            # memory-efficient gradient implementation
            return backend.tf.nn.swish(x)
        except AttributeError:
            pass

    return x * backend.sigmoid(x)