

import torch
x = torch.tensor(1)
print('x=: ', x)
print('dtype:', x.dtype)
print('dim:', x.dim())
print('shape:', x.shape)
print('data:', x.data)
print('number of elements:', x.numel())
print('convert to numpy:', x.numpy())
print('only one element:', x.item())
x=: tensor(1)
dtype: torch.int64
dim: 0
shape: torch.Size([])
data: tensor(1)
number of elements: 1
convert to numpy: 1
only one element: 1
y = torch.tensor(1.0)
print(y)
print(y.dtype)
y = : tensor(1.)
dtype : torch.float32
z = torch.tensor(1, dtype = torch.double)
print(z)
print(z.dtype)
z = tensor(1., dtype = torch.float64)
dtype : torch.float64
x = torch.tensor(1)
print('Before : id(x) = ', id(x))
x = torch.tensor(10)
print('After: id(x)=', id(x))
Before : id(x) = 4363833328
After: id(x)= 4363847072
y = torch.tensor(1.0)
y.data = torch.tensor(20)
y.data는 y가 정의가 되었을때만 쓸수 있다.
data = [1, 2, 3, 4, 5]
x = torch.tensor(data)
print('x=:', x)
print('size:', x.shape)
print('dimension:', x.dim())
print('number of elements:', x.numel())
x=: tensor([1, 2, 3, 4, 5])
size: torch.Size([5])
dimension: 1
number of elements: 5
y = x.numpy()
print('type:', type(y))
print('size:', y.shape)
type: <class 'numpy.ndarray'>
size: (5,)
모든 차원의 텐서는 Numpy로 바꿀수 있다. 다만, numpy는 CPU를 써서, 텐서가 GPU를 쓴다면, 코드가 안돌아갈수 있다.
import numpy as np
array = np.array([1, 2, 3, 4, 5])
print('arrary1 type:', type(array1))
x = torch.tensor(array1)
print('x=:', x)
print('type of x:', type(x))
print('data type:', x.dtype)
print('shape of x :', x.shape)
arrary1 type: <class 'numpy.ndarray'>
x=: tensor([1, 2, 3, 4, 5])
type of x: <class 'torch.Tensor'>
data type: torch.int64
shape of x : torch.Size([5])
y = torch.from_numpy(array1)
print('y=:', y)
print('type of y:', y.dtype)
print('size of y:', y.size())
y=: tensor([1, 2, 3, 4, 5])
type of y: torch.int64
size of y: torch.Size([5])
x = y.numpy()
print('x=:', x)
print('type of x:', type(x))
x=: [1 2 3 4 5]
type of x: <class 'numpy.ndarray'>
x = y.numpy()
y = torch.tensor(x, dtype = torch.int)
print('y=:', y)
print('data type:', y.dtype)
y=: tensor([1, 2, 3, 4, 5], dtype=torch.int32)
data type: torch.int32
z = torch.tensor(x, dtype = torch.float32)
print('z=:', z)
print('data type:', z.dtype)
z=: tensor([1., 2., 3., 4., 5.])
data type: torch.float32
data_a = [[1,2,3,4,5,6]]
data_b = [[1.,2.,3.],[4,5,6]]
a = torch.tensor(data_a)
b = torch.tensor(data_b)
print('a=:', a)
print('b=:', b)
print('dimension of a:', a.dim())
print('dimension of b:', b.dim())
print('size of a:', a.shape)
print('size of b:', b.shape)
print('number of elements: a', a.numel())
print('number of elements: b', b.numel())
print('data type of a:', a.dtype)
print('data type of b:', b.dtype)
a=: tensor([[1, 2, 3, 4, 5, 6]])
b=: tensor([[1., 2., 3.],
[4., 5., 6.]])
dimension of a: 2
dimension of b: 2
size of a: torch.Size([1, 6])
size of b: torch.Size([2, 3])
number of elements: a 6
number of elements: b 6
data type of a: torch.int64
data type of b: torch.float32
x = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.float32)
print('x=:', x)
print('stride:', x.stride())
print('storage:', x.storage())
print('storage offset:', x.storage_offset())
print('data ptr:', x.storage().data_ptr())
x=: tensor([[1., 2., 3.],
[4., 5., 6.]])
stride: (3, 1)
storage: 1.0
2.0
3.0
4.0
5.0
6.0
[torch.storage.TypedStorage(dtype=torch.float32, device=cpu) of size 6]
storage offset: 0
data ptr: 4805725440
x1 = torch.zeros(6)
x2 = torch.zeros(size = (2, 3))
print('x1=:', x1)
print('x2=:', x2)
y1 = torch.ones(6)
y2 = torch.ones(size = (2, 3))
print('y1=:', y1)
print('y2=:', y2)
x1=: tensor([0., 0., 0., 0., 0., 0.])
x2=: tensor([[0., 0., 0.],
[0., 0., 0.]])
y1=: tensor([1., 1., 1., 1., 1., 1.])
y2=: tensor([[1., 1., 1.],
[1., 1., 1.]])
z1 = torch.zeros_like(x1)
z2 = torch.ones_like(x2)
print('z1=:', z1)
print('z2=:', z2)
z1=: tensor([0., 0., 0., 0., 0., 0.])
z2=: tensor([[1., 1., 1.],
[1., 1., 1.]])
w = torch.ones(6).zero_()
print('w=:', w)
y1.zero_()
print('y1=:', y1)
w=: tensor([0., 0., 0., 0., 0., 0.])
y1=: tensor([0., 0., 0., 0., 0., 0.])
y1.zero()는 torch.zero(y1)으로 사용해도 무방하다.
x = torch.empty((2, 3))
print('x=:', x)
x=: tensor([[0., 0., 0.],
[0., 0., 0.]])
torch.fill_(x, 0.0)
print('x=:', x)
x=: tensor([[0., 0., 0.],
[0., 0., 0.]])
y = torch.full((2, 3), 0.0)
print('y=:', y)
z = torch.full_like(y, 1.0)
print('z=:', z)
y=: tensor([[0., 0., 0.],
[0., 0., 0.]])
z=: tensor([[1., 1., 1.],
[1., 1., 1.]])
x1 = np.arange(5)
y1 = torch.arange(end = 5)
y2 = torch.arange(start=1, end=10)
y3 = torch.arange(start=1, end=10, step=2)
z1 = torch.linspace(start=0, end=10, steps=2)
z2 = torch.linspace(start=0, end=10, steps=5)
print('x1=:', x1)
print('y1=:', y1)
print('y2=:', y2)
print('y3=:', y3)
print('z1=:', z1)
print('z2=:', z2)
x1=: [0 1 2 3 4]
y1=: tensor([0, 1, 2, 3, 4])
y2=: tensor([1, 2, 3, 4, 5, 6, 7, 8, 9])
y3=: tensor([1, 3, 5, 7, 9])
z1=: tensor([ 0., 10.])
z2=: tensor([ 0.0000, 2.5000, 5.0000, 7.5000, 10.0000])
중요 : torch.arange()는 end값을 포함하지 않지만, torch.linspace()는 end값을 자료에 포함한다.
torch.set_printoptions(precision=2)
torch.manual_seed(45)
x1 = torch.empty(6)
x2 = torch.rand(6)
x3 = torch.rand_like(x2)
print('x1=:', x1)
print('x2=:', x2)
print('x3=:', x3)
x1=: tensor([0., 0., 0., 0., 0., 0.])
x2=: tensor([0.19, 0.96, 0.68, 0.90, 0.05, 0.56])
x3=: tensor([0.79, 0.06, 0.78, 0.15, 0.04, 0.10])
x1 = torch.empty(6)
y1 = torch.randperm(4)
y2 = torch.randint(high=5, size = (10, ))
y3 = torch.randint_like(x1, high=5)
z1 = torch.normal(mean=2, std = 3, size=(10, ))
print('y1=:', y1)
print('y2=:', y2)
print('y3=:', y3)
print('z1=:', z1)
z1.normal_(1, 5)
print('z1=:', z1)
y1=: tensor([3, 1, 2, 0])
y2=: tensor([1, 3, 4, 2, 2, 4, 3, 0, 3, 4])
y3=: tensor([2., 2., 4., 1., 3., 4.])
z1=: tensor([ 3.46, 1.93, 0.87, -3.44, 6.13, 4.60, -2.58, -1.23, 0.33, -0.27])
z1=: tensor([-0.43, -3.03, 1.79, 2.51, -0.49, 1.76, 6.27, 6.18, 6.22, 7.92])
N, C, H, W = 2, 1, 3, 4
x = torch.IntTensor(N, C, H, W).zero_()
print('x=:', x)
print('size of x:', x.shape)
x=: tensor([[[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]],
[[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]]], dtype=torch.int32)
size of x: torch.Size([2, 1, 3, 4])
N은 이미지 개수, C는 채널의 숫자(흑백은 1, 컬러는 3), H W는 행과 열의 개수
y1 = torch.FloatTensor(6)
y2 = torch.FloatTensor([0, 1, 2, 3, 4, 5])
y3 = torch.FloatTensor(N, C, H, W).fill_(0)
print('y1=:', y1)
print('y2=:', y2)
print('y3=:', y3)
y1=: tensor([0., 0., 0., 0., 0., 0.])
y2=: tensor([0., 1., 2., 3., 4., 5.])
y3=: tensor([[[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]]],
[[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]]]])
x1 = torch.tensor([1,2,3,4], dtype=torch.int)
x2 = torch.tensor([1,2,3,4]).float()
x3 = torch.tensor([1,2,3,4]).to(torch.double)
y1 = x1.float()
y2 = x1.to(torch.double)
x = torch.tensor(())
y = x.new_tensor(data = [1,2])
z = x.new_zeros((5,))
s = x.new_ones((5,))
t = x.new_full((5,), 10.0)
print('x=:', x, 'x.dtype:', x.dtype, 'size :', x.shape)
print('y=:', y, 'y.dtype:', y.dtype, 'size:', y.shape)
print('z=:', z, 'z.dtype:', z.dtype, 'size:', z.shape)
print('s=:', s, 's.dtype:', s.dtype, 'size:', s.shape)
print('t=:', t, 't.dtype:', t.dtype, 'size :', t.shape)
x=: tensor([]) x.dtype: torch.float32 size : torch.Size([0])
y=: tensor([1., 2.]) y.dtype: torch.float32 size: torch.Size([2])
z=: tensor([0., 0., 0., 0., 0.]) z.dtype: torch.float32 size: torch.Size([5])
s=: tensor([1., 1., 1., 1., 1.]) s.dtype: torch.float32 size: torch.Size([5])
t=: tensor([10., 10., 10., 10., 10.]) t.dtype: torch.float32 size : torch.Size([5])
x는 크기가 0인 스칼라 텐서이다.
x = torch.tensor([1, 2, 3, 4, 5])
y1 = torch.zeros_like(x)
print('y1=:', y1)
y1.copy_(x)
print('y1=:', y1)
z = x.clone()
print('z=:', z)
w = x.detach()
print('w=:', w)
y1=: tensor([0, 0, 0, 0, 0])
y1=: tensor([1, 2, 3, 4, 5])
z=: tensor([1, 2, 3, 4, 5])
w=: tensor([1, 2, 3, 4, 5])
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
print(torch.cuda.is_available())
x_gpu = torch.tensor([1, 2, 3, 4], device = 'cuda')
y_gpu = torch.tensor([1, 2, 3, 4]).to(device = 'cuda')
x_cpu = torch.tensor([1, 2, 3, 4])
x_gpu = x_cpu.to(device = 'cuda:0')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
x = torch.tensor([[1,2,3],[4,5,6]]).to(device)
print('x=:', x)
print('cuda?', x.is_cuda)
allocated = torch.cuda.memory_allocated()
reserved = torch.cuda.memory_reserved()
print(f"{allocated} bytes allocated")
print("reserved={} byte = {} KB".format(reserved, (reserved/1024)))
summarized = torch.cuda.memory_summary(device = device, abbreviated = True)
print('memory summary:', summarized)
