모두를 위한 딥러닝 2 를 보고 정리한 글이다.
Vector, Matrix and Tensor
2D Tensor
|t| = (batch size, dim)
3D Tensor
|t| = (batch size, width, height) or (batch size, length, dim)
PyTorch Tensor
t = torch.FloatTensor([0., 1., 2., 3., 4., 5., 6.])
print(t)
# tensor([0., 1., 2., 3., 4., 5., 6.])
print('rank', t.dim())
print('shape', t.shape, t.size())
#rank 1
#shape torch.Size([7]) torch.Size([7])
Broadcasting
m1 = torch.FloatTensor([[3, 3]])
m2 = torch.FloatTensor([[2, 2]])
print(m1+m2)
#tensor([[5., 5.]])
m1 = torch.FloatTensor([[1, 2]])
m2 = torch.FloatTensor([3]) # 3 -> [3, 3]
print(m1+m2)
#tensor([[4., 5.]])
Multiplication & Matrix Multiplication
m1 = torch.FloatTensor([[1, 2], [3, 4]])
m2 = torch.FloatTensor([[1], [2]])
print('shape m1 :', m1.shape)
print('shpae m2 :', m2.shape)
print(m1.matmul(m2))
'''
shape m1 : torch.Size([2, 2])
shpae m2 : torch.Size([2, 1])
tensor([[ 5.],
[11.]])
'''
m1 = torch.FloatTensor([[1, 2], [3, 4]])
m2 = torch.FloatTensor([[1], [2]])
print('shape m1 :', m1.shape)
print('shpae m2 :', m2.shape)
print(m1 * m2)
print(m1.mul(m2))
'''
shape m1 : torch.Size([2, 2])
shpae m2 : torch.Size([2, 1])
tensor([[1., 2.],
[6., 8.]])
tensor([[1., 2.],
[6., 8.]])
'''
Mean
t = torch.FloatTensor([1, 2])
print(t.mean())
'''
tensor(1.5000)
'''
t = torch.FloatTensor([[1, 2], [3, 4]])
print(t.mean())
print(t.mean(dim=0))
print(t.mean(dim=1))
print(t.mean(dim=-1))
'''
tensor(2.5000)
tensor([2., 3.])
tensor([1.5000, 3.5000])
tensor([1.5000, 3.5000])
'''
Sum
t = torch.FloatTensor([[1, 2], [3, 4]])
print(t.sum())
print(t.sum(dim=0))
print(t.sum(dim=1))
print(t.sum(dim=-1))
'''
tensor(10.)
tensor([4., 6.])
tensor([3., 7.])
tensor([3., 7.])
'''
Max, Argamax
t = torch.FloatTensor([[1, 2], [3, 4]])
print(t.max())
'''
tensor(4.)
'''
print(t.max(dim=0))
print('Max: ', t.max(dim=0)[0])
print('Argmax: ', t.max(dim=0)[1])
'''
torch.return_types.max(
values=tensor([3., 4.]),
indices=tensor([1, 1]))
Max: tensor([3., 4.])
Argmax: tensor([1, 1])
'''
View (reshape)
t = np.array([[[0, 1, 2],
[3, 4, 5]],
[[6, 7, 8],
[9, 10, 11]]])
ft = torch.FloatTensor(t)
print(ft.shape)
print(ft.view([-1, 3]))
print(ft.view([-1, 3]).shape)
'''
torch.Size([2, 2, 3])
tensor([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
torch.Size([4, 3]
'''
Squeeze, Unsqueeze
ft = torch.FloatTensor([[[0]], [[1]], [[2]]])
print(ft)
print(ft.shape)
print(ft.squeeze())
print(ft.squeeze().shape)
'''
tensor([[[0.]],
[[1.]],
[[2.]]])
torch.Size([3, 1, 1])
tensor([0., 1., 2.])
torch.Size([3])
'''
ft = torch.Tensor([0, 1, 2])
print(ft.shape)
print(ft.unsqueeze(0)) #dim=0
print(ft.unsqueeze(0).shape)
'''
torch.Size([3])
tensor([[0., 1., 2.]])
torch.Size([1, 3])
'''
Type Casting
lt = torch.LongTensor([1, 2, 3, 4])
print(lt)
print(lt.float())
'''
tensor([1, 2, 3, 4])
tensor([1., 2., 3., 4.])
'''
bt = torch.ByteTensor([True, False, False, True])
print(bt)
print(bt.long())
print(bt.float())
'''
tensor([1, 0, 0, 1], dtype=torch.uint8)
tensor([1, 0, 0, 1])
tensor([1., 0., 0., 1.])
'''
Concatenate & Stacking
x = torch.FloatTensor([[1, 2], [3, 4]])
y = torch.FloatTensor([[5, 6], [7, 8]])
print(torch.cat([x, y], dim=0))
print(torch.cat([x, y], dim=1))
'''
tensor([[1., 2.],
[3., 4.],
[5., 6.],
[7., 8.]])
tensor([[1., 2., 5., 6.],
[3., 4., 7., 8.]])
'''
x = torch.FloatTensor([1, 4])
y = torch.FloatTensor([2, 5])
z = torch.FloatTensor([3, 6])
print(torch.stack([x, y, z]))
print(torch.stack([x, y, z], dim=1))
'''
tensor([[1., 4.],
[2., 5.],
[3., 6.]])
tensor([[1., 2., 3.],
[4., 5., 6.]])
'''
Ones and Zeros
x = torch.FloatTensor([[0, 1, 2], [2, 1, 0]])
print(x)
print(torch.ones_like(x))
print(torch.zeros_like(x))
'''
tensor([[0., 1., 2.],
[2., 1., 0.]])
tensor([[1., 1., 1.],
[1., 1., 1.]])
tensor([[0., 0., 0.],
[0., 0., 0.]])
'''
In-place Operation
mul()은 수행 후 x는 변함 없지만 mul_()을 수행하고 x는 변해있다.
x = torch.FloatTensor([[1, 2], [3, 4]])
print(x.mul(2.))
print(x)
print(x.mul_(2.))
print(x)
'''
tensor([[2., 4.],
[6., 8.]])
tensor([[1., 2.],
[3., 4.]])
tensor([[2., 4.],
[6., 8.]])
tensor([[2., 4.],
[6., 8.]])
'''