ํด๋น ๊ธ์ FastCampus - '[skill-up] ์ฒ์๋ถํฐ ์์ํ๋ ๋ฅ๋ฌ๋ ์ ์น์ ๊ฐ์๋ฅผ ๋ฃ๊ณ ,
์ถ๊ฐ ํ์ตํ ๋ด์ฉ์ ๋ง๋ถ์ฌ ์์ฑํ์์ต๋๋ค.
๋ชจ๋ธ ์: y = xW + b
๋ชฉ์ : ์ค์ y์ ์์ธก๊ฐ ลท ์ฌ์ด์ ์ค์ฐจ๋ฅผ ์ต์ํํ๋ W, b๋ฅผ ์ฐพ๋ ๊ฒ

x 1,000 iterations
boston = fetch_openml(name='boston', version=1, as_frame=True)
df = pd.DataFrame(boston.data, columns=boston.feature_names)
df["TARGET"] = boston.target
# df.tail()
# sns.pairplot(df)
# plt.show()
cols = ["TARGET", "INDUS", "RM", "LSTAT", "NOX", "DIS"]
# df[cols].describe()
# sns.pairplot(df[cols])
# plt.show()
data = torch.from_numpy(df[cols].values).float()
print(data.shape)
x = data[:, 1:]
y = data[:, :1]
print(x.shape, y.shape)
n_epochs = 2000 #ํ์ต ๋ฐ๋ณต ํ์
learning_rate = 1e-3
print_interval = 100
# x.shape: (batch_size, input features)
# y.shape: (batch_size, output targets)
model = nn.Linear(x.size(-1), y.size(-1)) # args: (input features, output features)
optimizer = optim.SGD(model.parameters(), # ํ๋ฅ ์ ๊ฒฝ์ฌ ํ๊ฐ๋ฒ(Stochastic Gradient Descent)
lr=learning_rate)
for i in range(n_epochs):
y_hat = model(x)
loss = F.mse_loss(y_hat, y)
optimizer.zero_grad() # gradient ์ด๊ธฐํ ๊ผญ ํด์ค์ผ ํจ! ์๋๋ฉด ๋ํด์ง
loss.backward() # ๊ธฐ์ธ๊ธฐ ๊ตฌํ๊ธฐ
optimizer.step() # optimizer์๊ฒ ๊ตฌํด์ง ๊ธฐ์ธ๊ธฐ๋ก ํ๋ผ๋ฏธํฐ ์
๋ฐ์ดํธ ํ๋๋ก
if (i + 1) % print_interval == 0:
print('Epoch %d: loss=%.4e' % (i + 1, loss))
torch.cat([y, y_hat], dim=1)
df = pd.DataFrame(torch.cat([y, y_hat], dim=1).detach_().numpy(),
columns=["y", "y_hat"])
sns.pairplot(df, height=3)
plt.show()