Skip to content

Commit e5ffca8

Browse files
author
xuming06
committed
add keras demo. xuming 20180223
1 parent b8ff0b1 commit e5ffca8

7 files changed

Lines changed: 226 additions & 0 deletions
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
# -*- coding: utf-8 -*-
2+
# Author: XuMing <[email protected]>
3+
# Brief: Multilayer Perception for multi-class softmax classification
4+
5+
import keras
6+
import numpy as np
7+
from keras.layers import Dense, Dropout
8+
from keras.models import Sequential
9+
from keras.optimizers import SGD
10+
11+
x_train = np.random.random((1000, 20))
12+
y_train = keras.utils.to_categorical(np.random.randint(10, size=(1000, 1)), num_classes=10)
13+
x_test = np.random.random((100, 20))
14+
y_test = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
15+
16+
model = Sequential()
17+
model.add(Dense(64, activation='relu', input_dim=20))
18+
model.add(Dropout(0.5))
19+
model.add(Dense(64, activation='relu'))
20+
model.add(Dropout(0.5))
21+
model.add(Dense(10, activation='softmax'))
22+
23+
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
24+
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
25+
model.fit(x_train, y_train, epochs=20, batch_size=128)
26+
score = model.evaluate(x_test, y_test, batch_size=128)
27+
print('total loss on test set:',score[0])
28+
print('accuracy of test set:',score[1])
Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
# -*- coding: utf-8 -*-
2+
# Author: XuMing <[email protected]>
3+
# Brief:
4+
import numpy as np
5+
from keras.layers import Dense, Dropout
6+
from keras.models import Sequential
7+
8+
x_train = np.random.random((1000, 20))
9+
y_train = np.random.randint(2, size=(1000, 1))
10+
x_test = np.random.random((100, 20))
11+
y_test = np.random.randint(2, size=(100, 1))
12+
13+
model = Sequential()
14+
model.add(Dense(64, activation='relu', input_dim=20))
15+
model.add(Dropout(0.5))
16+
model.add(Dense(64, activation='relu'))
17+
model.add(Dropout(0.5))
18+
model.add(Dense(1, activation='sigmoid'))
19+
20+
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
21+
model.fit(x_train, y_train, epochs=20, batch_size=128)
22+
score = model.evaluate(x_test, y_test, batch_size=128)
23+
print('total loss on test set:',score[0])
24+
print('accuracy of test set:',score[1])

07keras/04vgg_conv.py

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
# -*- coding: utf-8 -*-
2+
# Author: XuMing <[email protected]>
3+
# Brief:
4+
import numpy as np
5+
import keras
6+
from keras.layers import Conv2D, MaxPool2D
7+
from keras.layers import Dense, Dropout, Flatten
8+
from keras.models import Sequential
9+
10+
x_train = np.random.random((100, 100, 100, 3))
11+
y_train = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
12+
x_test = np.random.random((20, 100, 100, 3))
13+
y_test = keras.utils.to_categorical(np.random.randint(10, size=(20, 1)), num_classes=10)
14+
15+
model = Sequential()
16+
# input is 100x100 images with 3 channels
17+
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))
18+
model.add(Conv2D(32, (3, 3), activation='relu'))
19+
model.add(MaxPool2D(pool_size=(2, 2)))
20+
model.add(Dropout(0.25))
21+
22+
model.add(Conv2D(64, (3, 3), activation='relu'))
23+
model.add(Conv2D(64, (3, 3), activation='relu'))
24+
model.add(MaxPool2D(pool_size=(2, 2)))
25+
model.add(Dropout(0.25))
26+
27+
model.add(Flatten())
28+
model.add(Dense(256, activation='relu'))
29+
model.add(Dropout(0.5))
30+
model.add(Dense(10, activation='softmax'))
31+
32+
sgd = keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
33+
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
34+
model.fit(x_train, y_train, epochs=10, batch_size=32)
35+
score = model.evaluate(x_test, y_test, batch_size=32)
36+
print('total loss on test set:', score[0])
37+
print('accuracy of test set:', score[1])
38+
39+
# rmsprop loss 2.28, acc 0.13
40+
# sgd loss 2.26, acc 0.18

07keras/05lstm_classification.py

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
# -*- coding: utf-8 -*-
2+
# Author: XuMing <[email protected]>
3+
# Brief:
4+
import keras
5+
import numpy as np
6+
from keras.models import Sequential
7+
from keras.layers import Dense, Dropout
8+
from keras.layers import Embedding
9+
from keras.layers import LSTM
10+
11+
# Generate dummy data
12+
x_train = np.random.random((1000, 20))
13+
y_train = np.random.randint(2, size=(1000, 1))
14+
x_test = np.random.random((100, 20))
15+
y_test = np.random.randint(2, size=(100, 1))
16+
17+
model = Sequential()
18+
model.add(Embedding(128, output_dim=256))
19+
model.add(LSTM(128))
20+
model.add(Dropout(0.5))
21+
model.add(Dense(1, activation='sigmoid'))
22+
23+
model.compile(loss='binary_crossentropy',
24+
optimizer='rmsprop',
25+
metrics=['accuracy'])
26+
27+
model.fit(x_train, y_train, batch_size=128, epochs=2)
28+
score = model.evaluate(x_test, y_test, batch_size=128)
29+
print('total loss on test set:', score[0])
30+
print('accuracy of test set:', score[1])
31+
32+
# from keras.utils import plot_model
33+
# plot_model(model, to_file='lstm_model.png')
34+
35+
model.save('lstm_model.h5')
36+
del model
37+
# load model by file
38+
model = keras.models.load_model('lstm_model.h5')
39+
score = model.evaluate(x_test, y_test, batch_size=128)
40+
print('total loss on test set:', score[0])
41+
print('accuracy of test set:', score[1])
42+
43+
x_test = np.random.random((200, 20))
44+
y_test = np.random.randint(2, size=(200, 1))
45+
score = model.evaluate(x_test, y_test, batch_size=128)
46+
print('total loss on test set:', score[0])
47+
print('accuracy of test set:', score[1])

07keras/06sequential.py

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
# -*- coding: utf-8 -*-
2+
# Author: XuMing <[email protected]>
3+
# Brief:
4+
import keras
5+
6+
inputs = keras.layers.Input(shape=(784,))
7+
8+
# get tensor
9+
x = keras.layers.Dense(64, activation='relu')(inputs)
10+
x = keras.layers.Dense(4, activation='relu')(x)
11+
preds = keras.layers.Dense(10, activation='softmax')(x)
12+
13+
model = keras.models.Model(inputs=inputs, outputs=preds)
14+
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
15+
16+
# Generate dummy data
17+
import numpy as np
18+
19+
data = np.random.random((1000, 784))
20+
labels = keras.utils.to_categorical(np.random.randint(10, size=(1000, 1)), num_classes=10)
21+
model.fit(data, labels, epochs=10, batch_size=64)
22+
# loss: 2.2957 - acc: 0.1200
23+
24+
# model.save('my_seq_model.h5')
25+
# del model
26+
json_string = model.to_json()
27+
print(json_string)
28+
del model
29+
import pickle
30+
31+
with open('seq.pkl', 'wb')as f:
32+
pickle.dump(json_string, f)
33+
34+
x_test = np.random.random((1000, 784))
35+
y_test = keras.utils.to_categorical(np.random.randint(10, size=(1000, 1)), num_classes=10)
36+
# load model by file
37+
# model = keras.models.load_model('my_seq_model.h5')
38+
j = ''
39+
with open('seq.pkl','rb') as f:
40+
j = pickle.load(f)
41+
42+
model = keras.models.model_from_json(j)
43+
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
44+
score = model.evaluate(x_test, y_test, batch_size=128)
45+
print('total loss on test set:', score[0])
46+
print('accuracy of test set:', score[1])
47+
# loss: 2.2999 - acc: 0.1140

07keras/07shared_lstm.py

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
# -*- coding: utf-8 -*-
2+
# Author: XuMing <[email protected]>
3+
# Brief:
4+
import keras
5+
import numpy as np
6+
7+
tweet_a = keras.layers.Input(shape=(280, 256))
8+
tweet_b = keras.layers.Input(shape=(280, 256))
9+
10+
shared_lstm = keras.layers.LSTM(64)
11+
encoded_a = shared_lstm(tweet_a)
12+
encoded_b = shared_lstm(tweet_b)
13+
14+
# concatenate the two vectors
15+
merged_vector = keras.layers.concatenate([encoded_a, encoded_b], axis=-1)
16+
preds = keras.layers.Dense(1, activation='sigmoid')(merged_vector)
17+
18+
model = keras.models.Model(inputs=[tweet_a, tweet_b], outputs=preds)
19+
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
20+
21+
data_a = np.random.random((1000, 280, 256))
22+
data_b = np.random.random((1000, 280, 256))
23+
labels = np.random.randint(2, size=(1000, 1))
24+
model.fit([data_a, data_b], labels, epochs=10, batch_size=64)
25+
# loss: 0.5961 - acc: 0.7230

07keras/babi_rnn.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
# -*- coding: utf-8 -*-
2+
# Author: XuMing <[email protected]>
3+
# Brief:
4+
import numpy as np
5+
6+
from keras.utils.data_utils import get_file
7+
8+
try:
9+
path = get_file('babi-tasks-v1-2.tar.gz',
10+
origin='https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz')
11+
except:
12+
print('Error downloading dataset, please download it manually:\n'
13+
'$ wget http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz\n'
14+
'$ mv tasks_1-20_v1-2.tar.gz ~/.keras/datasets/babi-tasks-v1-2.tar.gz')
15+
raise

0 commit comments

Comments
 (0)