-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathneuralnet_backprop.py
More file actions
129 lines (101 loc) · 3.36 KB
/
neuralnet_backprop.py
File metadata and controls
129 lines (101 loc) · 3.36 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import numpy as np
def softmax(a):
c = np.max(a)
exp_a = np.exp(a - c)
y = exp_a / np.sum(exp_a)
return y
def softmax_batch(A):
return np.apply_along_axis(arr=A, axis=1, func1d=softmax)
def cross_entroy_error(y_pred, y):
"""분류용 손실함수"""
delta = 1e-7 # 아주 작은 값.
return -np.sum(y * np.log(y_pred + delta))
def cross_entropy_error_batch(y_pred, y):
batch_size = len(y)
cse = cross_entroy_error(y_pred, y) / batch_size
return cse
class ReLu:
def __init__(self):
self.mask = None
def forward(self, x):
self.mask = x > 0
return np.where(self.mask, x, 0)
def backward(self, dout):
return np.where(self.mask, 1, 0)
class Sigmoid:
def __init__(self):
self.out = None
def forward(self, x):
self.out = 1 / (1 + np.exp(-x))
return self.out
def backward(self, dout):
dx = dout * self.out * (1 - self.out)
return dx
class Affine:
def __init__(self, 입력수, 출력수):
self.W = np.random.randn(입력수, 출력수)
self.b = np.random.randn(출력수)
self.X = None
self.dW = None
self.db = None
def forward(self, X):
self.X = X
z = np.dot(X, self.W) + self.b
return z
def backward(self, dout):
dX = np.dot(dout, self.W.T)
self.dW = np.dot(self.X.T, dout)
self.db = np.sum(dout, axis=0)
return dX
class SoftmaxCrossEntropy:
def __init__(self):
self.Y = None
self.Y_pred = None
def forward(self, X, Y):
self.Y = Y
self.Y_pred = softmax_batch(X)
loss = cross_entropy_error_batch(self.Y_pred, self.Y)
return loss
def backward(self, dout=1):
batch_size = len(self.Y)
dX = (self.Y_pred - self.Y) / batch_size
return dX
class FeedForwadNet:
def __init__(self):
self.layers = []
def add(self, layer):
self.layers.append(layer)
def predict(self, X):
layer_output = X
for layer in self.layers[:-1]:
layer_output = layer.forward(layer_output)
return layer_output
def compute_loss(self, X, Y):
Y_pred = self.predict(X)
loss = self.layers[-1].forward(Y_pred, Y)
return loss
def fit(self, X, y, 배치크기, 학습횟수, 학습률):
loss_history = []
for i in range(학습횟수):
# 1. 미니배치
샘플수 = len(X)
배치색인 = np.random.choice(샘플수, 배치크기)
X_batch = X[배치색인]
y_batch = y[배치색인]
# 2. 기울기 산출
# 1) 순전파
self.compute_loss(X_batch, y_batch)
# 2) 역전파
dout = 1
for layer in reversed(self.layers):
dout = layer.backward(dout)
# 3. 갱신
for layer in self.layers:
if isinstance(layer, Affine):
layer.W -= layer.dW * 학습률
layer.b -= layer.db * 학습률
# 손실 확인
loss = self.compute_loss(X_batch, y_batch)
loss_history.append(loss)
print('[학습 {}] Loss: {}'.format(i+1, loss))
return loss_history