ํด๋น ๊ธ์ FastCampus - '[skill-up] ์ฒ์๋ถํฐ ์์ํ๋ ๋ฅ๋ฌ๋ ์ ์น์ ๊ฐ์๋ฅผ ๋ฃ๊ณ ,
์ถ๊ฐ ํ์ตํ ๋ด์ฉ์ ๋ง๋ถ์ฌ ์์ฑํ์์ต๋๋ค.



๐ฒ[AI] LinearRegression์์ ์ฌ์ฉํ๋ Linear model๊ณผ ๋น๊ต


boston = fetch_openml(name='boston', version=1, as_frame=True)
df = pd.DataFrame(boston.data, columns=boston.feature_names)
df["TARGET"] = boston.target
# df.head()
# Data Standardization: ์
๋ ฅ ๋ฐ์ดํฐ ํ์คํ โ ํ๊ท ์ 0์ผ๋ก, ํ์คํธ์ฐจ๋ฅผ 1๋ก ๋ณํํ์ฌ ๋ฐ์ดํฐ๋ฅผ ์กฐ์
scaler = StandardScaler()
scaler.fit(df.values[:, :-1])
df.iloc[:,:-1] = scaler.transform(df.values[:, :-1]) # N(0,1)
# df.head()
data = torch.from_numpy(df.values).float()
print(data.shape)
y = data[:, -1:]
x = data[:, :-1]
print(x.shape, y.shape)
n_epochs = 100000
learning_rate = 1e-4
print_interval = 5000
# relu = nn.ReLU()
# leaky_relu = nn.LeakyReLU(0.1)
## ๋ด๊ฐ ์ง์ Custom ํ๋ ๋ฐฉ๋ฒ
class MyModel(nn.Module):
def __init__(self, input_dim, output_dim):
self.input_dim = input_dim
self.output_dim = output_dim
super().__init__()
self.linear1 = nn.Linear(input_dim, 3)
self.linear2 = nn.Linear(3, 3)
self.linear3 = nn.Linear(3, output_dim)
self.act = nn.ReLU()
def forward(self, x):
# |x| = (batch_size, input_dim)
h = self.act(self.linear1(x)) # |h| = (batch_size, 3)
h = self.act(self.linear2(h))
y = self.linear3(h) #Regression ๋ฌธ์ ์์ activation function์ ๊ฐ์ด๋ฐ์๋ง!
# |y| = (batch_size, output_dim)
return y
custom_model = MyModel(x.size(-1), y.size(-1))
print(custom_model)
## Sequentialํ ๋ชจ๋ธ์ ๋ง๋๋ ๋ฐฉ๋ฒ
sequential_model = nn.Sequential(
nn.Linear(x.size(-1), 3),
nn.LeakyReLU(),
nn.Linear(3, 3),
nn.LeakyReLU(),
nn.Linear(3, 3),
nn.LeakyReLU(),
nn.Linear(3, 3),
nn.LeakyReLU(),
nn.Linear(3, 3),
nn.LeakyReLU(),
nn.Linear(3, y.size(-1)),
)
print(sequential_model)
optimizer = optim.SGD(model.parameters(),
lr=learning_rate)
for i in range(n_epochs):
y_hat = model(x)
loss = F.mse_loss(y_hat, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % print_interval == 0:
print('Epoch %d: loss=%.4e' % (i + 1, loss))