天天看點

Keras Adam代碼解析以及EMA的Adam優化器

文章目錄

  • ​​Keras Adam​​
  • ​​初始化​​
  • ​​更新函數​​
  • ​​帶EMA的Adam​​

Adam理論可以參考下這裡

​​優化算法的選擇​

Keras Adam

class Adam(Optimizer):
    """Adam optimizer.

    Default parameters follow those provided in the original paper.

    # Arguments
        learning_rate: float >= 0. Learning rate.
        beta_1: float, 0 < beta < 1. Generally close to 1.
        beta_2: float, 0 < beta < 1. Generally close to 1.
        amsgrad: boolean. Whether to apply the AMSGrad variant of this
            algorithm from the paper "On the Convergence of Adam and
            Beyond".

    # References
        - [Adam - A Method for Stochastic Optimization](
           https://arxiv.org/abs/1412.6980v8)
        - [On the Convergence of Adam and Beyond](
           https://openreview.net/forum?id=ryQu7f-RZ)
    """

    def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999,
                 amsgrad=False, **kwargs):
        self.initial_decay = kwargs.pop('decay', 0.0)
        self.epsilon = kwargs.pop('epsilon', K.epsilon())
        learning_rate = kwargs.pop('lr', learning_rate)
        super(Adam, self).__init__(**kwargs)
        with K.name_scope(self.__class__.__name__):
            self.iterations = K.variable(0, dtype='int64', name='iterations')
            self.learning_rate = K.variable(learning_rate, name='learning_rate')
            self.beta_1 = K.variable(beta_1, name='beta_1')
            self.beta_2 = K.variable(beta_2, name='beta_2')
            self.decay = K.variable(self.initial_decay, name='decay')
        self.amsgrad = amsgrad

    @interfaces.legacy_get_updates_support
    @K.symbolic
    def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params) # 擷取梯度
        self.updates = [K.update_add(self.iterations, 1)]

        lr = self.learning_rate
        # 如果初始學習速率衰減因子不為0,則随着疊代次數增加,學習速率将不斷減小
        if self.initial_decay > 0:
            lr = lr * (1. / (1. + self.decay * K.cast(self.iterations,
                                                      K.dtype(self.decay))))

        t = K.cast(self.iterations, K.floatx()) + 1
        # 有偏估計到無偏估計的校正值
        # 這裡将循環内的公共計算提到循環外面,提高速度
        lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
                     (1. - K.pow(self.beta_1, t)))
        # 一階矩估計初始值
        ms = [K.zeros(K.int_shape(p),
              dtype=K.dtype(p),
              name='m_' + str(i))
              for (i, p) in enumerate(params)]
        # 二階矩估計初始值
        vs = [K.zeros(K.int_shape(p),
              dtype=K.dtype(p),
              name='v_' + str(i))
              for (i, p) in enumerate(params)]

        if self.amsgrad:
            vhats = [K.zeros(K.int_shape(p),
                     dtype=K.dtype(p),
                     name='vhat_' + str(i))
                     for (i, p) in enumerate(params)]
        else:
            vhats = [K.zeros(1, name='vhat_' + str(i))
                     for i in range(len(params))]
        self.weights = [self.iterations] + ms + vs + vhats

        for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g # 一階矩估計
            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g) # 二階矩估計
            if self.amsgrad:
                vhat_t = K.maximum(vhat, v_t)
                p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon)
                self.updates.append(K.update(vhat, vhat_t))
            else:
                p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon) # 權值更新

            self.updates.append(K.update(m, m_t))
            self.updates.append(K.update(v, v_t))
            new_p = p_t

            # 如果參數有限制,對權值添加限制
            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)

            self.updates.append(K.update(p, new_p))
        return self.updates
    # 擷取目前超參數
    def get_config(self):
        config = {'learning_rate': float(K.get_value(self.learning_rate)),
                  'beta_1': float(K.get_value(self.beta_1)),
                  'beta_2': float(K.get_value(self.beta_2)),
                  'decay': float(K.get_value(self.decay)),
                  'epsilon': self.epsilon,
                  'amsgrad': self.amsgrad}
        base_config = super(Adam, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))      

初始化

繼承父類​

​optimizer​

​​初始化了​

​self.updates = []​

​​和​

​self.weights = []​

​​,​

​allowed_kwargs​

​用于初始化裁剪梯度的函數l1或者l2,這個參數貌似很少輸入

def __init__(self, **kwargs):
        allowed_kwargs = {'clipnorm', 'clipvalue'}
        for k in kwargs:
            if k not in allowed_kwargs:
                raise TypeError('Unexpected keyword argument '
                                'passed to optimizer: ' + str(k))
        self.__dict__.update(kwargs)
        self.updates = []
        self.weights = []      

Adam初始化了

​​

​initial_decay​

​​

​epsilon​

​接近0的數,避免除0

​learning_rate​

​ 生成變量空間存放了以下常量

​iterations​

​ 疊代次數

​learning_rate​

​ 學習率

​beta_1​

​ 一階矩估計的指數衰減因子

​beta_2​

​二階矩估計的指數衰減因子

​decay​

​ 學習速率衰減因子

​amsgrad​

​ adam的一種優化方式

更新函數

見注釋

帶EMA的Adam

​@export_to_custom_objects​

​​裝飾器主要是對建立的優化器類命名并添加到keras的custom_object中

​​

​keras.utils.get_custom_objects()[name] = NewOptimizer​

​​ 其他的請看注釋,執行流程有個問題就是keras訓練過程中如何控制ema權重的初始化代碼不再執行的,也就是下面的代碼:

​K.batch_set_value(zip(self.ema_weights, self.old_weights))​

@export_to_custom_objects
def extend_with_exponential_moving_average(BaseOptimizer):
    """傳回新的優化器類,加入EMA(權重滑動平均)
    """
    class NewOptimizer(BaseOptimizer):
        """帶EMA(權重滑動平均)的優化器,EMA實際上就是權重,隻不過我們最後用
        """
        @insert_arguments(ema_momentum=0.999)
        def __init__(self, *args, **kwargs):
            super(NewOptimizer, self).__init__(*args, **kwargs)

        def get_updates(self, loss, params):
            # 調用父類 get_updates 就更新了權重 m v
            updates = super(NewOptimizer, self).get_updates(loss, params)
            self.model_weights = params  # 用于更新和reset
            self.ema_weights = [K.zeros(K.shape(w)) for w in params]  # ema 初始化
            self.old_weights = K.batch_get_value(params)
            # 滑動平均不是這樣的,是否權重初始化後後續隻能K.update
            K.batch_set_value(zip(self.ema_weights, self.old_weights))

            ema_updates, ema_momentum = [], self.ema_momentum
            # 控制依賴,後續執行需要在updates執行後,執行後params就做了更新
            with tf.control_dependencies(updates):
                for w1, w2 in zip(self.ema_weights, params):
                    new_w = ema_momentum * w1 + (1 - ema_momentum) * w2
                    ema_updates.append(K.update(w1, new_w))

            return ema_updates

        def get_config(self):
            config = {'ema_momentum': self.ema_momentum,
                      }
            base_config = super(NewOptimizer, self).get_config()
            return dict(list(base_config.items()) + list(config.items()))

        def apply_ema_weights(self):
            """備份原模型權重,然後将平均權重應用到模型上去。
            """
            self.old_weights = K.batch_get_value(self.model_weights)
            ema_weights = K.batch_get_value(self.ema_weights)
            K.batch_set_value(zip(self.model_weights, ema_weights))

        def reset_old_weights(self):
            """恢複模型到舊權重。
            """
            K.batch_set_value(zip(self.model_weights, self.old_weights))

    return NewOptimizer