import tensorflow as tf
import numpy as np
아래의 기본 연산은 특수 메서드를 이용하여 연산자 오버로딩이 되어 있으므로 그냥 연산자 기호를 사용하는게 가능!
a = tf.range(6, dtype=tf.int32)
b = 2 * tf.ones(6, dtype=tf.int32)
tf.add(a, b)
<tf.Tensor: shape=(6,), dtype=int32, numpy=array([2, 3, 4, 5, 6, 7])>
a / b
<tf.Tensor: shape=(6,), dtype=float64, numpy=array([0. , 0.5, 1. , 1.5, 2. , 2.5])>
여러가지 연산
a, b
(<tf.Tensor: shape=(6,), dtype=int32, numpy=array([0, 1, 2, 3, 4, 5])>,
<tf.Tensor: shape=(6,), dtype=int32, numpy=array([2, 2, 2, 2, 2, 2])>)
tf.minimum(a, b)
<tf.Tensor: shape=(6,), dtype=int32, numpy=array([0, 1, 2, 2, 2, 2])>
tf.sqrt(tf.cast(a, tf.float32))
<tf.Tensor: shape=(6,), dtype=float32, numpy=
array([0. , 0.99999994, 1.4142134 , 1.7320508 , 2. ,
2.236068 ], dtype=float32)>
rank_2 = tf.random.normal((3, 3))
rank_2
<tf.Tensor: shape=(3, 3), dtype=float32, numpy=
array([[ 0.20503631, -0.08427859, 1.4715824 ],
[-0.19386227, 2.079227 , -0.97439444],
[-0.9562997 , -0.05859427, -1.1111549 ]], dtype=float32)>
rank_2[0, 2]
<tf.Tensor: shape=(), dtype=float32, numpy=1.4715824>
rank_2[:, 1]
<tf.Tensor: shape=(3,), dtype=float32, numpy=array([-0.08427859, 2.079227 , -0.05859427], dtype=float32)>
rank_3 = tf.random.normal((3, 3, 3))
rank_3
<tf.Tensor: shape=(3, 3, 3), dtype=float32, numpy=
array([[[-1.1816031 , 1.7547415 , 0.66091967],
[ 0.27051273, -1.6336673 , -0.6169637 ],
[-0.8609951 , -1.3612835 , -0.45804065]],
[[-0.1852628 , 1.0304868 , -1.1574664 ],
[ 0.47465524, -0.36961123, -0.13274199],
[ 0.24387467, 1.5095061 , 2.053255 ]],
[[ 0.05444736, 1.928631 , 0.9452522 ],
[-0.5561081 , -0.5834151 , -0.6043173 ],
[ 0.7617505 , -0.00767427, -1.5076568 ]]], dtype=float32)>
rank_3[1, 1, 2]
<tf.Tensor: shape=(), dtype=float32, numpy=-0.13274199>
rank_4 = tf.random.normal((3, 3, 3, 3))
rank_4
<tf.Tensor: shape=(3, 3, 3, 3), dtype=float32, numpy=
array([[[[ 0.50706774, 0.499987 , 1.7789496 ],
[ 0.3312048 , -0.6388882 , 1.4398739 ],
[-1.4943681 , 1.8661255 , 1.7865485 ]],
[[-0.548494 , -0.5122591 , -0.3703277 ],
[-2.031433 , -0.37000778, -0.3568647 ],
[-0.49226046, 0.5954219 , -0.27069664]],
[[-0.06709263, 1.492129 , -0.49123618],
[ 1.0171849 , 0.11365218, -1.5203798 ],
[ 1.1505216 , 1.5809278 , 1.280719 ]]],
[[[ 0.24090762, 0.31654504, 0.7547957 ],
[ 0.6891229 , -0.6157129 , 0.9864865 ],
[-0.29514185, 0.6387484 , 0.04963104]],
[[-0.7597706 , -0.22093774, -1.8093712 ],
[ 0.6058919 , -0.7206745 , -1.3585502 ],
[ 0.7494383 , 2.9992824 , -1.0932549 ]],
[[ 0.71013945, -0.7078991 , 2.5863402 ],
[-0.5615792 , -1.789575 , -0.0407865 ],
[-0.15379922, -0.45686248, -1.1889204 ]]],
[[[-0.24976186, 0.37862897, -0.24878731],
[ 0.9135386 , 0.40640938, 0.42519498],
[ 0.13572034, -0.78109914, 0.20643151]],
[[ 2.2774432 , 0.14634542, -0.8373875 ],
[-0.6189941 , -0.2676697 , -0.19102365],
[-0.5517868 , -0.5336576 , -0.385302 ]],
[[ 0.14415428, 1.1691757 , 0.50464725],
[ 1.3920383 , -1.9812403 , -0.7602143 ],
[ 1.1363733 , 0.14947249, -0.17976034]]]], dtype=float32)>
rank_4[0, 1, 2, 2]
<tf.Tensor: shape=(), dtype=float32, numpy=-0.27069664>
a
<tf.Tensor: shape=(6,), dtype=int32, numpy=array([0, 1, 2, 3, 4, 5])>
tf.reduce_sum(a, axis=0)
<tf.Tensor: shape=(), dtype=int32, numpy=15>
tf.reduce_sum(a, axis=0, keepdims=True)
<tf.Tensor: shape=(1,), dtype=int32, numpy=array([15])>
b = tf.random.normal((2, 7))
b
<tf.Tensor: shape=(2, 7), dtype=float32, numpy=
array([[ 0.7363715 , 0.70459914, 0.72874826, -1.2354152 , 1.9600862 ,
0.2008639 , 1.3937117 ],
[-2.0779068 , -1.1812352 , -0.3841972 , -0.88684154, 1.632099 ,
-0.23234195, -1.4421362 ]], dtype=float32)>
tf.reduce_mean(b, axis=1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([ 0.6412808 , -0.65322286], dtype=float32)>
tf.reduce_mean(b, axis=0)
<tf.Tensor: shape=(7,), dtype=float32, numpy=
array([-0.67076766, -0.23831803, 0.17227553, -1.0611284 , 1.7960926 ,
-0.01573902, -0.02421224], dtype=float32)>
c = tf.random.normal((2, 4, 3))
c
<tf.Tensor: shape=(2, 4, 3), dtype=float32, numpy=
array([[[-2.0622218 , -0.969671 , -0.2998341 ],
[-0.04462142, -0.10837448, 0.578238 ],
[-0.374006 , -0.9593053 , -0.3088316 ],
[-0.8554225 , -1.6576148 , 0.68067485]],
[[ 0.43476957, 0.31935734, 1.8985823 ],
[-1.9004401 , -0.32145387, 1.7793106 ],
[-0.73562187, 0.38154897, -0.27515203],
[ 1.8222455 , 0.5732691 , 0.4024673 ]]], dtype=float32)>
tf.reduce_mean(c, axis=0)
<tf.Tensor: shape=(4, 3), dtype=float32, numpy=
array([[-0.81372607, -0.32515684, 0.7993741 ],
[-0.9725308 , -0.21491417, 1.1787744 ],
[-0.5548139 , -0.28887814, -0.29199183],
[ 0.4834115 , -0.54217285, 0.5415711 ]], dtype=float32)>
tf.reduce_mean(c, axis=1)
<tf.Tensor: shape=(2, 3), dtype=float32, numpy=
array([[-0.83406794, -0.92374134, 0.16256179],
[-0.09476176, 0.2381804 , 0.95130205]], dtype=float32)>
tf.reduce_mean(c, axis=2)
<tf.Tensor: shape=(2, 4), dtype=float32, numpy=
array([[-1.1105756 , 0.14174737, -0.547381 , -0.61078745],
[ 0.8842364 , -0.14752781, -0.20974164, 0.9326606 ]],
dtype=float32)>
a = tf.constant([[2, 0], [0, 1]], dtype=tf.float32)
b = tf.constant([[1, 0], [1, 1]], dtype=tf.float32)
tf.matmul(a, b)
<tf.Tensor: shape=(2, 2), dtype=float32, numpy=
array([[2., 0.],
[1., 1.]], dtype=float32)>
tf.linalg.inv(a)
<tf.Tensor: shape=(2, 2), dtype=float32, numpy=
array([[0.5, 0. ],
[0. , 1. ]], dtype=float32)>
이를 사용 할 때는 축을 잘 이해하고 사용해야 한다.
a = tf.range(6, dtype=tf.int32) # [0, 1, 2, 3, 4, 5]
print('a : ', a, '\n')
a_2d = tf.reshape(a, (2, 3)) # 1차원 벡터를 2x3 크기의 2차원 행렬로 변환
print('a_2d : ', a_2d, '\n')
a_2d_t = tf.transpose(a_2d) # 2x3 크기의 2차원 행렬을 3x2 크기의 2차원 행렬로 변환
print('a_2d_t : ', a_2d_t, '\n')
a_3d = tf.expand_dims(a_2d, 0) # 2x3 크기의 2차원 행렬을 1x2x3 크기의 3차원 행렬로 변환
print('a_3d : ', a_3d, '\n')
a_4d = tf.expand_dims(a_3d, 3) # 1x2x3 크기의 3차원 행렬을 1x2x3x1 크기의 4차원 행렬로 변환
print('a_4d : ', a_4d, '\n')
a_1d = tf.squeeze(a_4d) # 1x2x3x1 크기의 4차원 행렬을 1차원 벡터로 변환
print('a_1d : ', a_1d, '\n')
a : tf.Tensor([0 1 2 3 4 5], shape=(6,), dtype=int32)
a_2d : tf.Tensor(
[[0 1 2]
[3 4 5]], shape=(2, 3), dtype=int32)
a_2d_t : tf.Tensor(
[[0 3]
[1 4]
[2 5]], shape=(3, 2), dtype=int32)
a_3d : tf.Tensor(
[[[0 1 2]
[3 4 5]]], shape=(1, 2, 3), dtype=int32)
a_4d : tf.Tensor(
[[[[0]
[1]
[2]]
[[3]
[4]
[5]]]], shape=(1, 2, 3, 1), dtype=int32)
a_1d : tf.Tensor(
[[0 1 2]
[3 4 5]], shape=(2, 3), dtype=int32)
a = tf.reshape(tf.range(12), (3, 4))
a
<tf.Tensor: shape=(3, 4), dtype=int32, numpy=
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])>
tf.slice(a, [0, 1], [2, 3]) # [0, 1]위치에서 [2개, 3개]만큼 뽑아낸다.
<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[1, 2, 3],
[5, 6, 7]])>
a1, a2 = tf.split(a, num_or_size_splits=2, axis=1)
print(a1)
print(a2)
tf.Tensor(
[[0 1]
[4 5]
[8 9]], shape=(3, 2), dtype=int32)
tf.Tensor(
[[ 2 3]
[ 6 7]
[10 11]], shape=(3, 2), dtype=int32)
tf.concat([a1, a2], 1)
<tf.Tensor: shape=(3, 4), dtype=int32, numpy=
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])>
tf.concat([a1, a2], 0)
<tf.Tensor: shape=(6, 2), dtype=int32, numpy=
array([[ 0, 1],
[ 4, 5],
[ 8, 9],
[ 2, 3],
[ 6, 7],
[10, 11]])>
tf.tile(a1, [1, 3])
<tf.Tensor: shape=(3, 6), dtype=int32, numpy=
array([[0, 1, 0, 1, 0, 1],
[4, 5, 4, 5, 4, 5],
[8, 9, 8, 9, 8, 9]])>
a3 = tf.stack([a1, a2])
a3
<tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy=
array([[[ 0, 1],
[ 4, 5],
[ 8, 9]],
[[ 2, 3],
[ 6, 7],
[10, 11]]])>
tf.unstack(a3, axis=1)
[<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[0, 1],
[2, 3]])>,
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[4, 5],
[6, 7]])>,
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[ 8, 9],
[10, 11]])>]
간단 퀴즈: 다음 코드를 에러 없이 실행하라.
a = tf.constant(((1, 2, 3), (1, 2, 3)))
b = tf.constant([1, 2, 3])
tf.matmul(a, b)
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-77-1d1bfe7e3e4f> in <module>
2 b = tf.constant([1, 2, 3])
3
----> 4 tf.matmul(a, b)
c:\Users\theo\miniconda3\envs\ds_study\lib\site-packages\tensorflow\python\util\traceback_utils.py in error_handler(*args, **kwargs)
151 except Exception as e:
152 filtered_tb = _process_traceback_frames(e.__traceback__)
--> 153 raise e.with_traceback(filtered_tb) from None
154 finally:
155 del filtered_tb
c:\Users\theo\miniconda3\envs\ds_study\lib\site-packages\tensorflow\python\framework\ops.py in raise_from_not_ok_status(e, name)
7105 def raise_from_not_ok_status(e, name):
7106 e.message += (" name: " + name if name is not None else "")
-> 7107 raise core._status_to_exception(e) from None # pylint: disable=protected-access
7108
7109
InvalidArgumentError: In[0] and In[1] has different ndims: [2,3] vs. [3] [Op:MatMul]
a, b
(<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[1, 2, 3],
[1, 2, 3]])>,
<tf.Tensor: shape=(3,), dtype=int32, numpy=array([1, 2, 3])>)
tf.expand_dims(b, 1)
<tf.Tensor: shape=(3, 1), dtype=int32, numpy=
array([[1],
[2],
[3]])>
tf.matmul(a, tf.expand_dims(b, 1))
<tf.Tensor: shape=(2, 1), dtype=int32, numpy=
array([[14],
[14]])>