-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathnetwork.py
More file actions
138 lines (110 loc) · 3.77 KB
/
network.py
File metadata and controls
138 lines (110 loc) · 3.77 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
import numpy as np
from random import random
weights_table = {}
bias_table = {}
grad_weights_table = {}
grad_bias_table = {}
class Layer(object):
"""Represent a layer in Neuron Network"""
def __init__(self, input_width, width, activation=None):
self.width = width
self.input_width = input_width
self.activation = activation
# weighted input
self.h = None
# after activation function
self.output = None
self.grad_input = None
self.output = None
self.index = None
self.is_last_layer = False
@property
def weights(self):
return weights_table.get(self.index)
@property
def bias(self):
return bias_table.get(self.index)
def initialize(self):
weights_table[self.index] = np.random.rand(self.width, self.input_width)
bias_table[self.index] = np.random.rand(self.width, 1)
def debug_print(self):
print("Weights", self.weights)
print("output", self.output)
def forward(self, input_data):
# weighted input
self.h = np.dot(input_data, self.weights) + self.bias
(func, dev) = self.get_activation()
if func:
self.output = func(self.h)
else:
self.output = self.h
self.debug_print()
@staticmethod
def activation_func1(x):
return 1. / (1 + np.exp(-x))
@staticmethod
def activation_dev1(output):
return output*(1-output)
@staticmethod
def activation_func2(x):
return max(x, 0)
@staticmethod
def activation_dev2(output):
return np.sgn(output)
def get_activation(self)
if self.activation == "sigmoid":
return (self.activation_func1, self.activation_dev1)
elif self.activation == "relu":
return (self.activation_func2, self.activation_dev2)
else
return (None, None)
def calculate_gradient(self, prev_grad):
(func, dev) = self.get_activation()
shared_grad = prev_grad
if not func:
shared_grad = shared_grad * dev(self.ouput)
nr_of_samples = prev_grad.shape[0]
grad_weight = 1/nr_of_sample * np.sum(np.outer(shared_grad, self.prev_layer.output), axis=0)
grad_weights_table[self.index] = grad_weight
transfer_grad = np.dot(shared_grad, self.weight.T)
return transfer_grad
def update(self, learning_rate=0.1):
self.weights -= learning_rate * self.grad_weight
class Model(object):
def __init__(self):
self.input_layer = Layer(2, 1)
self._layers = []
self._layers.append(Layer(2, 2))
self._layers[-1].prev_layer = self.input_layer
self._layers.append(Layer(1, 2))
self._layers[-1].prev_layer = self._layers[-2]
def train(self):
X = np.array([[1, 1], [0, 0], [0, 1], [1,0]])
Y = np.array([[0], [0], [1], [1]])
output = self.evaluate(X)
self.backprop(self.grad_output(output, Y))
self.update()
def grad_output(self, output, y):
return y-output
def evaluate(self, input_data):
self.input_layer.output = input_data
for layer in self._layers:
layer.forward()
return self._layers[-1].output
def backprop(self, prev_grad):
for layer in reversed(self._layers):
prev_grad = layer.calculate_gradient(prev_grad)
def update(self):
for layer in self._layers:
layer.update(self.learning_rate)
def initialize(self):
"""initialize weights and biases"""
for layer in self._layers:
layer.initialize()
def report(self):
pass
if __name__ == "__main__":
m = Model(1)
m.train()
out = m.evaluate([1, 1])
print(out)