|
@@ -49,17 +49,17 @@ biases = {
|
|
|
|
|
|
# Building the encoder
|
|
|
def encoder(x):
|
|
|
- # Encoder Hidden layer with relu activation #1
|
|
|
+ # Encoder Hidden layer with sigmoid activation #1
|
|
|
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']), biases['encoder_b1']))
|
|
|
- # Decoder Hidden layer with relu activation #2
|
|
|
+ # Decoder Hidden layer with sigmoid activation #2
|
|
|
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']), biases['encoder_b2']))
|
|
|
return layer_2
|
|
|
|
|
|
# Building the decoder
|
|
|
def decoder(x):
|
|
|
- # Encoder Hidden layer with relu activation #1
|
|
|
+ # Encoder Hidden layer with sigmoid activation #1
|
|
|
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']), biases['decoder_b1']))
|
|
|
- # Decoder Hidden layer with relu activation #2
|
|
|
+ # Decoder Hidden layer with sigmoid activation #2
|
|
|
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']), biases['decoder_b2']))
|
|
|
return layer_2
|
|
|
|