
class Regression():
def init_network(self):
with open(r"sample_weight.pkl", 'rb') as f:
network = pickle.load(f)
return network
def predict(self, x):
W1, W2, W3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
a1 = np.dot(x, W1) + b1
z1 = self.sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = self.sigmoid(a2)
a3 = np.dot(z2, W3) + b3
y = self.softmax(a3)
return y
def regression(self, batch_size:int):
x, t = self.get_data()
network = self.init_network()
accuracy_cnt = 0
for i in range(0, len(x), batch_size):
x_batch = x[i:i+batch_size]
y_batch = self.predict(x_batch)
p= np.argmax(y_batch, axis = 1) # 확률이 가장 높은 원소의 인덱스를 얻는다.
accuracy_cnt += np.sum(p == t[i:i+batch_size])
print("Accuracy : " + str(float(accuracy_cnt) / len(x)))
에서 매서드 regression을 실행
line 33, in predict
W1, W2, W3 = network['W1'], network['W2'], network['W3']
^^^^^^^
NameError: name 'network' is not defined
self.predict를 실행하기 전에network을 정의했는데 왜 정의되지 않았다고 할까?
regression에서 self.predict를 호출했을 때
predict 매서드를 끌고 내려옴 (X)
predict 가 정의된 line으로 거슬러 올라감 (O)
→ regression 내부에서 정의한 network = self.init_network()을 사용할 수 없음
⋯ (중략) ⋯
def predict(self, x):
network = self.init_network()
W1, W2, W3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
a1 = np.dot(x, W1) + b1
z1 = self.sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = self.sigmoid(a2)
a3 = np.dot(z2, W3) + b3
y = self.softmax(a3)
return y
⋯ (중략) ⋯
def predict(self, x, network):
# network = self.init_network()
W1, W2, W3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
a1 = np.dot(x, W1) + b1
z1 = self.sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = self.sigmoid(a2)
a3 = np.dot(z2, W3) + b3
y = self.softmax(a3)
return y
def regression(self, batch_size:int):
x, t = self.get_data()
accuracy_cnt = 0
network = self.init_network()
for i in range(0, len(x), batch_size):
x_batch = x[i:i+batch_size]
y_batch = self.predict(x_batch, network)
p= np.argmax(y_batch, axis = 1)
accuracy_cnt += np.sum(p == t[i:i+batch_size])
print("Accuracy : " + str(float(accuracy_cnt) / len(x)))