Artificial Neuron

Austin Jiuk Kim·2022년 3월 23일
0

Deep Learning

목록 보기
4/10

Code.1-3-1: Activation Layers

# In Activation layer, only activation

import tensorflow as tf

from tensorflow.math import exp, maximum
from tensorflow.keras.layers import Activation  # Activation is a kind of Layers

x = tf.random.normal(shape=(1, 5)) # input setting

# imp. activation function
sigmoid = Activation('sigmoid')
tanh = Activation('tanh')
relu = Activation('relu')

# forward propagation(TensorFlow)
y_sigmoid_tf = sigmoid(x)
y_tanh_tf = tanh(x)
y_relu_tf = relu(x)

# forward propagation(manual)
y_sigmoid_man = 1 / (1 + exp(-x))
y_tanh_man = (exp(x) - exp(-x)) / (exp(x) + exp(-x))
y_relu_man = maximum(x, 0)


print(f'x: {x.shape}\n{x.numpy()}')
print()
print(f'Sigmoid(TensorFlow): {y_sigmoid_tf.shape}\n{y_sigmoid_tf.numpy()}')
print(f'Sigmoid(TensorFlow): {y_sigmoid_man.shape}\n{y_sigmoid_man.numpy()}')
print()
print(f'Tanh(TensorFlow): {y_tanh_tf.shape}\n{y_tanh_tf.numpy()}')
print(f'Tanh(TensorFlow): {y_tanh_man.shape}\n{y_tanh_man.numpy()}')
print()
print(f'ReLU(TensorFlow): {y_relu_tf.shape}\n{y_relu_tf.numpy()}')
print(f'ReLU(TensorFlow): {y_relu_man.shape}\n{y_relu_man.numpy()}')
x: (1, 5)
[[ 0.4140593  -1.0886137  -1.9466102   1.297912   -0.11210307]]

Sigmoid(TensorFlow): (1, 5)
[[0.6020608  0.2518794  0.12492344 0.78548336 0.47200355]]
Sigmoid(TensorFlow): (1, 5)
[[0.60206085 0.25187942 0.12492345 0.78548336 0.47200358]]

Tanh(TensorFlow): (1, 5)
[[ 0.39191395 -0.79637164 -0.96005476  0.8611846  -0.1116358 ]]
Tanh(TensorFlow): (1, 5)
[[ 0.39191392 -0.7963717  -0.96005493  0.8611847  -0.1116358 ]]

ReLU(TensorFlow): (1, 5)
[[0.4140593 0.        0.        1.297912  0.       ]]
ReLU(TensorFlow): (1, 5)
[[0.4140593 0.        0.        1.297912  0.       ]]

Code.1-3-2: Activation in Dense Layer

# In Dense layer, affine + activation

import tensorflow as tf
from tensorflow.math import exp
from tensorflow.keras.layers import Dense   # Dense is a kind of Layers

x = tf.random.normal(shape=(1, 5)) # input setting

# imp. artificial neuron
# make a unit of dense layer in combination of the activation
dense_sigmoid = Dense(units=1, activation='sigmoid')
dense_tanh = Dense(units=1, activation='tanh')
dense_relu = Dense(units=1, activation='relu')

# forward propagation(tensorflow)
y_sigmoid = dense_sigmoid(x)
y_tanh = dense_tanh(x)
y_relu = dense_relu(x)

print(f'x: {x.shape}\n{x.numpy()}')
print()
print(f'AN with Sigmoid: {y_sigmoid.shape}\n{y_sigmoid.numpy()}')
print(f'AN with Tanh: {y_tanh.shape}\n{y_tanh.numpy()}')
print(f'AN with ReLU: {y_relu.shape}\n{y_relu.numpy()}')

print()
print('======')
print()

# forward progataion(manual)
W, B = dense_sigmoid.get_weights()
z = tf.linalg.matmul(x, W) + B
a = 1 / (1 + exp(-z))

print(f'Activation value(TensorFlow): {y_sigmoid.shape}\n{y_sigmoid.numpy()}')
print(f'Activation value(manual): {a.shape}\n{a.numpy()}')
x: (1, 5)
[[ 1.1621749 -0.7989249  2.0781152 -0.9444862  0.6221622]]

AN with Sigmoid: (1, 1)
[[0.7378292]]
AN with Tanh: (1, 1)
[[-0.7343226]]
AN with ReLU: (1, 1)
[[0.]]

======

Activation value(TensorFlow): (1, 1)
[[0.7378292]]
Activation value(manual): (1, 1)
[[0.73782927]]
profile
그냥 돼지

0개의 댓글