PyTorch 로 시작하는 딥러닝 입문 책
추천1) colab 에서 내 구글드라이브 마운트, 인증까지 완료할 것
from google.colab import drive
drive.mount('/content/drive')
2) cloudflared 실행
from colab_ssh import launch_ssh_cloudflared, init_git_cloudflared
launch_ssh_cloudflared(password="cho7611278")
3) vscode 에서 실행
- 커맨트+쉬프트+p → connect to host → [rank-outdoor-rt-rise.trycloudflare.com](http://rank-outdoor-rt-rise.trycloudflare.com/) 실행후 비밀번호 입력
*
연산하면 브로드캐스팅 or elementry-wise 연산 일어남 → mul 함수로도 가능 y = torch.randint(5, (10,5))
print(y)
'''
tensor([[2, 4, 2, 4, 4],
[4, 4, 0, 1, 2],
[2, 1, 3, 3, 0],
[4, 3, 1, 3, 0],
[4, 1, 2, 2, 4],
[2, 2, 4, 3, 2],
[1, 0, 0, 4, 4],
[4, 3, 4, 3, 1],
[3, 4, 3, 1, 1],
[1, 2, 2, 3, 1]])
'''
y_label = y.argmax(dim=1)
y_label
'''
tensor([1, 0, 2, 4, 0, 0, 0, 4, 2, 2])
'''
one_hot 함수 사용 가능
autograd 함수 자주 사용
,
,
에서 z 미분 구하기
w = torch.tensor(2.0, requires_grad=True)
y = w**2
z = 2*y + 5
z.backward()
w.grad
'''
tensor(8.) # z/dw = 4*w -> w = 2 -> 8
'''
import numpy as np
# create dummy data for training
x_values = [i for i in range(11)]
x_train = np.array(x_values, dtype=np.float32)
x_train = x_train.reshape(-1, 1)
y_values = [2*i + 1 for i in x_values]
y_train = np.array(y_values, dtype=np.float32)
y_train = y_train.reshape(-1, 1)
import torch
from torch.autograd import Variable
class LinearRegression(torch.nn.Module):
def __init__(self, inputSize, outputSize):
super(LinearRegression, self).__init__()
self.linear = torch.nn.Linear(inputSize, outputSize)
def forward(self, x):
out = self.linear(x)
return out
inputDim = 1 # takes variable 'x'
outputDim = 1 # takes variable 'y'
learningRate = 0.01
epochs = 100
model = LinearRegression(inputDim, outputDim)
##### For GPU #######
if torch.cuda.is_available():
model.cuda()
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learningRate)
for epoch in range(epochs):
# Converting inputs and labels to Variable
if torch.cuda.is_available():
inputs = Variable(torch.from_numpy(x_train).cuda())
labels = Variable(torch.from_numpy(y_train).cuda())
else:
inputs = Variable(torch.from_numpy(x_train))
labels = Variable(torch.from_numpy(y_train))
# Clear gradient buffers because we don't want any gradient from previous epoch to carry forward, dont want to cummulate gradients
optimizer.zero_grad()
# get output from the model, given the inputs
outputs = model(inputs)
# get loss for the predicted output
loss = criterion(outputs, labels)
print(loss)
# get gradients w.r.t to parameters
loss.backward()
# update parameters
optimizer.step()
print('epoch {}, loss {}'.format(epoch, loss.item()))
'''
...
epoch 98, loss 0.19512228667736053
tensor(0.1929, device='cuda:0', grad_fn=<MseLossBackward>)
epoch 99, loss 0.1929432600736618
'''
with torch.no_grad(): # we don't need gradients in the testing phase
if torch.cuda.is_available():
predicted = model(Variable(torch.from_numpy(x_train).cuda())).cpu().data.numpy()
else:
predicted = model(Variable(torch.from_numpy(x_train))).data.numpy()
print(predicted)
'''
[[ 0.1829003]
[ 2.3005698]
[ 4.4182396]
[ 6.535909 ]
[ 8.653579 ]
[10.771248 ]
[12.888918 ]
[15.006587 ]
[17.124256 ]
[19.241926 ]
[21.359596 ]]
'''
BoostCamp AI Tech