오늘은 오랜만에 NumPy를 배웠을 때 많이 헷갈려했던 Axis의 기억이 다시 떠오른 하루였다. PyTorch에도 dim이라는 텐서의 행과 열을 지정하는 속성이 있어서 조금 어지러웠지만 몇 번 다시 보니 금방 감을 잡았다.
x = torch.ones(2,2)
print(x)
# tensor([[1., 1.],
# [1., 1.]])
x *= 2
print(x)
# tensor([[2., 2.],
# [2., 2.]])
x += 3
print(x)
# tensor([[5., 5.],
# [5., 5.]])
x = torch.tensor([[1,2], [3,4]])
y = torch.tensor([[4,3], [2,1]])
# 더하기
add = torch.add(x, y)
print(add)
print(x + y)
# tensor([[5, 5],
# [5, 5]])
# 빼기
sub = torch.sub(x, y)
print(sub)
print(x - y)
# tensor([[-3, -1],
# [ 1, 3]])
# 곱하기
mul = torch.mul(x, y)
print(mul)
print(x * y)
# tensor([[4, 6],
# [6, 4]])
# 나누기
div = torch.div(x, y)
print(div)
print(x / y)
# tensor([[0.2500, 0.6667],
# [1.5000, 4.0000]])
x = torch.tensor([[5,6], [7,8]])
y = torch.tensor([[3,1], [4,2]])
x.add_(y)
print(x)
# tensor([[ 8, 7],
# [11, 10]])
x.fill_(0)
print(x)
# tensor([[0, 0],
# [0, 0]])
x = torch.tensor([[1.1,2.2,3.3], [4,5,6], [-1,-3.14,-6]])
print("절대값", torch.abs(x))
# 절대값 tensor([[1.1000, 2.2000, 3.3000],
# [4.0000, 5.0000, 6.0000],
# [1.0000, 3.1400, 6.0000]])
print("올림", torch.ceil(x))
# 올림 tensor([[ 2., 3., 4.],
# [ 4., 5., 6.],
# [-1., -3., -6.]])
print("반올림", torch.round(x))
# 반올림 tensor([[ 1., 2., 3.],
# [ 4., 5., 6.],
# [-1., -3., -6.]])
print("내림", torch.floor(x))
# 내림 tensor([[ 1., 2., 3.],
# [ 4., 5., 6.],
# [-1., -4., -6.]])
print("모든 원소의 합", torch.sum(x))
# 모든 원소의 합 tensor(11.4600)
print("모든 원소의 곱", torch.prod(x))
# 모든 원소의 곱 tensor(-18054.7500)
print("최소값", torch.min(x))
# 최소값 tensor(-6.)
print("최대값", torch.max(x))
# 최대값 tensor(6.)
print("최소값의 인덱스", torch.argmin(x))
# 최소값의 인덱스 tensor(8)
print("최대값의 인덱스", torch.argmax(x))
# 최대값의 인덱스 tensor(5)
print("평균값", torch.mean(x))
# 평균값 tensor(1.2733)
print("표준편차", torch.std(x))
# 표준편차 tensor(3.9719)
print("중복제거", torch.unique(torch.tensor([1,1,1,2,2,2,3,3,3,4,4])))
# 중복제거 tensor([1, 2, 3, 4])
print("클램프", torch.clamp(x, -2, 2))
# 클램프 tensor([[ 1.1000, 2.0000, 2.0000],
# [ 2.0000, 2.0000, 2.0000],
# [-1.0000, -2.0000, -2.0000]]
# 차원 지정
x = torch.tensor([[1.1,2.2,3.3], [4,5,6], [-1,-3.14,-6]])
print("열의 합", torch.sum(x, dim=0))
print("행의 합", torch.sum(x, dim=1))
# 열의 합 tensor([4.1000, 4.0600, 3.3000])
# 행의 합 tensor([ 6.6000, 15.0000, -10.1400])
print("열의 최대값", torch.max(x, dim=0))
# 열의 최대값 torch.return_types.max(
# values=tensor([4., 5., 6.]),
# indices=tensor([1, 1, 1]))
x = torch.tensor([[1,2], [3,4]])
y = torch.tensor([[5,6], [7,8]])
print(torch.matmul(x,y))
# tensor([[19, 22],
# [43, 50]])
x = torch.tensor([[1,2,3], [4,5,6], [7,8,9]])
print(x[1,1])
# tensor(5)
print(x[2,0])
# tensor(7)
print(x[0, :])
# tensor([1, 2, 3])
print(x[:, 2])
# tensor([3, 6, 9])
# fancy 인덱싱
x = torch.tensor([[1,2,3], [4,5,6], [7,8,9]])
print(x[[0,1,2], [0,1,2]])
# tensor([1, 5, 9])
# boolean 인덱싱
print(x[x > 5])
# tensor([6, 7, 8, 9])
x = torch.tensor([10,20,30,40,50])
x[0] = 100
print(x)
# tensor([100, 20, 30, 40, 50])
x[1:4] = torch.tensor([200,300,400])
print(x)
# tensor([100, 200, 300, 400, 50])
torch.view()x = torch.randn(4,5)
print(x)
# tensor([[ 1.6556, 1.1775, 0.7074, 0.6514, 1.2222],
# [-3.4192, 0.5839, -0.5892, 0.9749, -0.3656],
# [-0.9102, -1.4960, 0.4842, 0.2422, -0.6213],
# [-1.2977, 0.1044, -1.1074, -0.6357, 0.8306]])
y = x.view(20)
print(y)
# tensor([ 1.6556, 1.1775, 0.7074, 0.6514, 1.2222, -3.4192, 0.5839, -0.5892,
# 0.9749, -0.3656, -0.9102, -1.4960, 0.4842, 0.2422, -0.6213, -1.2977,
# 0.1044, -1.1074, -0.6357, 0.8306])
z = y.view(5, -1) # -1 -> 자동 계산
print(z)
# tensor([[-1.3062, 0.7716, -0.2739, 0.2950],
# [ 0.0949, -0.3929, -0.3141, -2.0559],
# [-1.0060, 1.5867, 1.1121, -0.1188],
# [-1.6458, -1.6057, 0.3260, 0.7940],
# [-0.4304, 0.6981, 0.0717, 1.4957]])
tensor.item() : 스칼라 값 하나만 존재해야 사용 가능x = torch.rand(1)
print(x)
print(x[0])
print(x.item())
# tensor([0.3532])
# tensor(0.3532)
# 0.3532150983810425
torch.squeeze() : 크기가 1인 차원을 제거함t = torch.rand(3,1,3)
print(t)
print(t.shape)
# tensor([[[0.6100, 0.5116, 0.2696]],
# [[0.8604, 0.1251, 0.0841]],
# [[0.4251, 0.3889, 0.3191]]])
# torch.Size([3, 1, 3])
ts = torch.squeeze(t)
print(ts)
print(ts.shape)
# tensor([[0.6100, 0.5116, 0.2696],
# [0.8604, 0.1251, 0.0841],
# [0.4251, 0.3889, 0.3191]])
# torch.Size([3, 3])
torch.unsqueeze() : dim 설정에 따라 차원을 증가시킴t = torch.rand(3,3,3)
print(t.shape)
# torch.Size([3, 3, 3])
ts1 = torch.unsqueeze(t, dim=0)
print(ts1.shape)
# torch.Size([1, 3, 3, 3])
ts2 = torch.unsqueeze(t, dim=1)
print(ts2.shape)
# torch.Size([3, 1, 3, 3])
ts3 = torch.unsqueeze(t, dim=2)
print(ts3.shape)
# torch.Size([3, 3, 1, 3])
ts4 = torch.unsqueeze(t, dim=3)
print(ts4.shape)
# torch.Size([3, 3, 3, 1])
torch.stack() : 동일한 형식의 텐서를 새롭게 그룹화 함 -> 새로운 차원이 생성됨x = torch.tensor([1,4])
y = torch.tensor([2,5])
z = torch.tensor([3,6])
result = torch.stack([x,y,z])
print(result)
# tensor([[1, 4],
# [2, 5],
# [3, 6]])
a = torch.tensor([[1,2], [3,4]])
b = torch.tensor([[5,6], [7,8]])
result2 = torch.stack([a, b])
print(result2)
# tensor([[[1, 2],
# [3, 4]],
# [[5, 6],
# [7, 8]]])
torch.cat() : 데이터를 하나의 텐서로 병합 -> 새로운 차원 생성Xa = torch.tensor([[1,2], [3,4], [5,6]])
b = torch.tensor([[7,8], [9,10]])
result = torch.cat((a, b))
print(result)
# tensor([[ 1, 2],
# [ 3, 4],
# [ 5, 6],
# [ 7, 8],
# [ 9, 10]])
a = torch.tensor([[1,2], [3,4]])
b = torch.tensor([[7,8], [9,10]])
result2 = torch.cat((a, b), dim=1)
print(result2)
# tensor([[ 1, 2, 7, 8],
# [ 3, 4, 9, 10]])
torch.chunk() : 텐서를 지정한 개수에 따라 나눔a = torch.rand(3,7)
print(a)
# tensor([[0.1705, 0.9585, 0.9547, 0.8704, 0.1652, 0.5958, 0.9330],
# [0.7946, 0.5683, 0.9037, 0.7619, 0.3425, 0.0082, 0.4182],
# [0.6893, 0.9638, 0.4572, 0.4528, 0.0766, 0.4159, 0.0359]])
a1, a2, a3 = torch.chunk(a, 3)
print(a1)
# tensor([[0.1705, 0.9585, 0.9547, 0.8704, 0.1652, 0.5958, 0.9330]])
print(a2)
# tensor([[0.7946, 0.5683, 0.9037, 0.7619, 0.3425, 0.0082, 0.4182]])
print(a3)
# tensor([[0.6893, 0.9638, 0.4572, 0.4528, 0.0766, 0.4159, 0.0359]])
a = torch.rand(3,7)
print(a)
# tensor([[0.8378, 0.5858, 0.4332, 0.4985, 0.0919, 0.8994, 0.7126],
# [0.9848, 0.8618, 0.6856, 0.3227, 0.2699, 0.8010, 0.6921],
# [0.0596, 0.2603, 0.2632, 0.7922, 0.5997, 0.2149, 0.7465]])
a1, a2, a3 = torch.chunk(a, 3, dim=1)
print(a1)
# tensor([[0.8378, 0.5858, 0.4332],
# [0.9848, 0.8618, 0.6856],
# [0.0596, 0.2603, 0.2632]])
print(a2)
# tensor([[0.4985, 0.0919, 0.8994],
# [0.3227, 0.2699, 0.8010],
# [0.7922, 0.5997, 0.2149]])
print(a3)
# tensor([[0.7126],
# [0.6921],
# [0.7465]])
torch.split() : 지정한 사이즈에 따라 나눔a = torch.rand(3,7)
print(a)
# tensor([[0.7691, 0.8303, 0.2884, 0.1934, 0.3759, 0.3085, 0.9404],
# [0.0326, 0.4732, 0.2993, 0.2168, 0.2406, 0.6826, 0.1089],
# [0.1272, 0.9000, 0.9769, 0.7887, 0.7715, 0.1723, 0.1917]])
a1, a2, a3= torch.split(a, 3, dim=1)
print(a1)
# tensor([[0.7691, 0.8303, 0.2884],
# [0.0326, 0.4732, 0.2993],
# [0.1272, 0.9000, 0.9769]])
print(a2)
# tensor([[0.1934, 0.3759, 0.3085],
# [0.2168, 0.2406, 0.6826],
# [0.7887, 0.7715, 0.1723]])
print(a3)
# tensor([[0.9404],
# [0.1089],
# [0.1917]])
a = torch.rand(3,7)
print(a)
# tensor([[0.1268, 0.6118, 0.6873, 0.4629, 0.7768, 0.9058, 0.4305],
# [0.9716, 0.5351, 0.6880, 0.8228, 0.5792, 0.6738, 0.9804],
# [0.4442, 0.8235, 0.4395, 0.2360, 0.4738, 0.6875, 0.7432]])
a1, a2, a3= torch.split(a, [1,1,5], dim=1)
print(a1)
# tensor([[0.1268],
# [0.9716],
# [0.4442]])
print(a2)
# tensor([[0.6118],
# [0.5351],
# [0.8235]])
print(a3)
# tensor([[0.6873, 0.4629, 0.7768, 0.9058, 0.4305],
# [0.6880, 0.8228, 0.5792, 0.6738, 0.9804],
# [0.4395, 0.2360, 0.4738, 0.6875, 0.7432]])
오늘로 PyTorch의 기본적인 내용은 끝났고 내일부터 AI를 본격적으로 찍먹해볼 것 같다.