A self encoder for eliminating noise based on keras

primary coverage

In this paper, we mainly introduce the implementation of the noise free self encoder based on keras. In fact, for a common autoencode, simply speaking, there is a latent space to strengthen the input and output and represent the input vector. Therefore, the dimension of the input and output is the same, but the function of classification can be realized through autoencode, mainly in the network A different activation function is added to the last layer, and then the stack self encoder based on stack in 2016 is the upgraded version of DA. It is mainly to add multiple hidden layers in latent space. For these hidden layers, the corresponding front layer between layers is input to the back layer, and the back layer continues to code until the last layer. The simple schematic diagram can be as follows


Stack self encoder

Here is the simple code for implementation

# -*- coding: utf-8 -*-
"""
Created on Mon Dec 18 14:49:54 2017

@author: Administrator
"""

#! /usr/bin/python
# -*- coding: utf8 -*-

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras
from keras.layers import Activation, Dense, Input
from keras.layers import Conv2D, Flatten
from keras.layers import Reshape, Conv2DTranspose
from keras.models import Model
from keras.layers import GaussianNoise
from keras import backend as K
from keras.datasets import mnist
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from keras import regularizers
import scipy.io as scio  


dataFile = 'endUse.mat'  
datadic = scio.loadmat(dataFile)   
data = datadic['Endmatrix']
data=data.T
totalnumber=np.size(data,0)
trainsplit=int(0.8*totalnumber)
data_train=data[:trainsplit]

data_test=data[trainsplit:]

noise = np.random.normal(loc=0, scale=0.4, size=data_train.shape)
data_train_noisy = data_train + noise
noise = np.random.normal(loc=0, scale=0.4, size=data_test.shape)
data_test_noisy = data_test + noise





def contractive_autoencoder(data_train_noisy,data_train,data_test_noisy,data_test, lam=0.1):
    #data_train_noisy = data_train_noisy.reshape(data_train_noisy.shape[0], -1)
    M, N = data_train_noisy.shape
    N_hidden = 2000
    N_batch = 128

    inputs = Input(shape=(N,))
    encoded = Dense(N_hidden, activation='relu', name='encoded')(inputs)
    outputs = Dense(N, activation='linear')(encoded)

    model = Model(input=inputs, output=outputs)

    def contractive_loss(y_pred, y_true):
        mse = K.mean(K.square(y_true - y_pred), axis=1)

        W = K.variable(value=model.get_layer('encoded').get_weights()[0])  # N x N_hidden
        W = K.transpose(W)  # N_hidden x N
        h = model.get_layer('encoded').output
        dh = h * (1 - h)  # N_batch x N_hidden

        # N_batch x N_hidden * N_hidden x 1 = N_batch x 1
        contractive = lam * K.sum(dh**2 * K.sum(W**2, axis=1), axis=1)

        return mse + contractive

    model.compile(optimizer='adam', loss=contractive_loss)
    model.fit(data_train_noisy,
                    data_train,
                    validation_data=(data_test_noisy, data_test),
                    epochs=100,
                    batch_size=N_batch)

    return model, Model(input=inputs, output=encoded)

model, representation = contractive_autoencoder(data_train_noisy,data_train,data_test_noisy,data_test)

In this case, mse is used to activate the function. Here's how it's written in keras

Tags: network Python

Posted on Wed, 06 Nov 2019 12:45:10 -0500 by cashflowtips