JH721 SW자율차 [DeepLearning, CNN] //9주차-3

JH·2021년 6월 9일
0

자율 자동차 SW 개발

목록 보기
19/37

softmax

import tensorflow as tf
import numpy as np
tf.get_logger().setLevel('ERROR')

xy = np.loadtxt('../../softmax.csv', delimiter=',', unpack=True, dtype='float32')
print(xy)
print()
x_data = np.transpose(xy[:3])
y_data = np.transpose(xy[3:])
print('x_data :', x_data.shape)
print('y_data :', y_data.shape)

W = tf.Variable(tf.zeros([3,3]))

hx = lambda X: tf.nn.softmax(tf.matmul(X, W))

cost = lambda Y: tf.reduce_mean(-tf.reduce_sum(Y * tf.math.log(hx(x_data)),1))
rate = tf.Variable(.01)

opt = tf.optimizers.SGD(rate)
for step in range(2001):
    with tf.GradientTape() as tape:
        loss = cost(y_data)
    grads = tape.gradient(loss, [W])
    process_grads = [g for g in grads]
    grads_and_vars = zip(process_grads, [W])
    opt.apply_gradients(grads_and_vars)
    
    if step & 200 ==0:
        print(step, grads[0].numpy(), '\n\rw =', W.numpy())

print('-'*60)
a = hx([[1., 11., 7.]]).numpy()
b = hx([[1., 3., 4.]]).numpy()
c = hx([[1., 1., 0.]]).numpy()
abc = hx([[1., 11., 7.], [1., 3., 4.], [1., 1., 0.]]).numpy()

print('[1, 11, 7] :', a, tf.argmax(a, 1).numpy())
print('[1, 3, 4] :', b, tf.argmax(b, 1).numpy())
print('[1, 1, 0] :', c, tf.argmax(c, 1).numpy())
print('[1, 11, 7], [1, 3, 4], [1, 1, 0] : \n\r\b', abc, tf.argmax(abc, 1).numpy())

XOR

import tensorflow as tf
import numpy as np


xy = np.loadtxt('../../xor.csv', delimiter=',', unpack=True, dtype='float32')
print(xy)
print()
x_data = np.transpose(xy[:-1])
y_data = np.reshape(xy[-1], (4,1))
dataset = tf.data.Dataset.from_tensor_slices((x_data, y_data)).batch(len(x_data))

def preprocess_data(features, labels):
    features = tf.cast(features, tf.float32)
    lablels = tf.cast(labels, tf.float32)
    return features, labels

W1 = tf.Variable(tf.random.uniform((2, 2), name='weight1'))
W2 = tf.Variable(tf.random.uniform((2, 1), name='weight2'))

b1 = tf.Variable(tf.random.uniform((2,), name='bias1'))
b2 = tf.Variable(tf.random.uniform((1,), name='bias2'))

def neural_net(features):
    layer1 = tf.sigmoid(tf.matmul(features, W1) + b1)
    hypothesis = tf.sigmoid(tf.matmul(layer1, W2) + b2)
    return hypothesis

def loss_fn(hypothesis, labels):
    cost = -tf.reduce_mean(labels * tf.math.log(hypothesis) + (1 - labels) * tf.math.log(1 - hypothesis))
    return cost             

optimizer = tf.keras.optimizers.Nadam(learning_rate=0.01)                 

def accuracy_fn(hypothesis, labels):
     predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32)
     accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, labels), dtype=tf.float32))
     return accuracy
def grad(hypothesis, features, labels)   :
    with tf.GradientTape() as tape:
        loss_value = loss_fn(neural_net(features), labels)
    return tape.gradient(loss_value, [W1, W2, b1, b2])
                 
EPOCHS = 5000


for step in range(EPOCHS):
    with tf.GradientTape() as tape:
        for features, labels in dataset:
            features, labels = preprocess_data(features, labels)
            grads = grad(neural_net(features), features, labels)
            graph_and_vars=zip(grads, [W1, W2, b1, b2])
            optimizer.apply_gradients(graph_and_vars)

            if step % 500 == 0:
                print(step, loss_fn(neural_net(features),labels).numpy())

# TEST
correct_prediction = tf.cast(neural_net(features) > 0.5, dtype=tf.float32)
accuracy_fn(neural_net(features), labels).numpy()
correct_prediction

XOR using relu, softmax(tf1)

import tensorflow as tf
import tensorflow.compat.v1 as tf1
import numpy as np

# tf1.disable_eager_execution()
sess = tf1.InteractiveSession()

x_ = [[0,0], [0,1], [1,0], [1,1]]
expect = [[1,0], [0,1], [0,1], [1,0]] # -> one-hot encording
x = tf1.placeholder("float", [None, 2])
y_ = tf1.placeholder("float", [None, 2])

num_hidden = 20
W = tf.Variable(tf.random.uniform([2, num_hidden], -.01, .01))
b = tf.Variable(tf.random.uniform([num_hidden], -.01, .01))
hidden = tf.nn.relu(tf.matmul(x, W) + b)

W2 = tf.Variable(tf.random.uniform([num_hidden, 2], -.01, .01))
b2 = tf.Variable(tf.random.uniform([2], -.01, .01))

y = tf.nn.softmax(tf.matmul(hidden, W2) + b2)

cross_entropy = -tf.reduce_sum(y_ * tf.math.log(y))
train_step = tf1.train.GradientDescentOptimizer(0.2).minimize(cross_entropy)
tf1.global_variables_initializer().run()

for step in range(1000):
    feed = {x:x_, y_:expect}
    e, a = sess.run([cross_entropy, train_step], feed)
    if e < 1: break
    print(f"{step} : {e : .4f}")
print('-'*60)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
print(f"Accuracy : {accuracy.eval(feed)*100:.2f}%" )
learned_output=tf.argmax(y,1)
print(learned_output.eval({x: x_}))

sess.close()
profile
JH.velog

0개의 댓글