Skip to content

Commit ea78cac

Browse files
author
xuming06
committed
add pytorch demo.
1 parent ae945b7 commit ea78cac

34 files changed

Lines changed: 160809 additions & 18 deletions

02numpy/01.array.py

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,12 +5,17 @@
55
"""
66
from __future__ import print_function
77
from __future__ import unicode_literals
8+
import tracemalloc
89

10+
tracemalloc.start(10)
11+
12+
time1 = tracemalloc.take_snapshot()
913
# 导入numpy
1014
# 很多其他科学计算的第三方库都是以Numpy为基础建立的。
1115
# Numpy的一个重要特性是它的数组计算。
1216
from numpy import *
1317

18+
1419
# 使用前一定要先导入 Numpy 包,导入的方法有以下几种:
1520
# import numpy
1621
# import numpy as np
@@ -85,13 +90,27 @@
8590
b = sin(a)
8691
print(b)
8792

93+
8894
# 画图
8995
from matplotlib import pyplot as plt
9096

9197
plt.plot(a, b)
92-
plt.show()
98+
# plt.show()
9399

94100
# 从数组中选择元素
95101
# 假设我们想选取数组b中所有非负的部分,首先可以利用 b 产生一组布尔值:
96102
mask = b >= 0
97103
print(mask)
104+
105+
106+
time2 = tracemalloc.take_snapshot()
107+
108+
stats = time2.compare_to(time1, 'lineno')
109+
print('*'*32)
110+
for stat in stats[:3]:
111+
print(stat)
112+
113+
stats = time2.compare_to(time1, 'traceback')
114+
print('*'*32)
115+
for stat in stats[:3]:
116+
print(stat.traceback.format())

17tensorflow/01.seq2seq_demo.py

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
# -*- coding: utf-8 -*-
2+
"""
3+
@author:XuMing([email protected])
4+
@description:
5+
"""
6+
7+
from seq2seq import SimpleSeq2Seq, Seq2Seq, AttentionSeq2Seq
8+
import numpy as np
9+
from keras.utils.test_utils import keras_test
10+
11+
input_length = 5
12+
input_dim = 3
13+
14+
output_length = 3
15+
output_dim = 4
16+
17+
samples = 100
18+
hidden_dim = 24
19+
20+
21+
@keras_test
22+
def test_SimpleSeq2Seq():
23+
x = np.random.random((samples, input_length, input_dim))
24+
y = np.random.random((samples, output_length, output_dim))
25+
26+
models = []
27+
print(x)
28+
print(y)
29+
models += [SimpleSeq2Seq(output_dim=output_dim, hidden_dim=hidden_dim, output_length=output_length,
30+
input_shape=(input_length, input_dim))]
31+
models += [SimpleSeq2Seq(output_dim=output_dim, hidden_dim=hidden_dim, output_length=output_length,
32+
input_shape=(input_length, input_dim), depth=2)]
33+
34+
for model in models:
35+
model.compile(loss='mse', optimizer='sgd')
36+
model.fit(x, y, nb_epoch=1)
37+
38+
39+
@keras_test
40+
def test_Seq2Seq():
41+
x = np.random.random((samples, input_length, input_dim))
42+
y = np.random.random((samples, output_length, output_dim))
43+
44+
models = []
45+
models += [Seq2Seq(output_dim=output_dim, hidden_dim=hidden_dim, output_length=output_length,
46+
input_shape=(input_length, input_dim))]
47+
models += [Seq2Seq(output_dim=output_dim, hidden_dim=hidden_dim, output_length=output_length,
48+
input_shape=(input_length, input_dim), peek=True)]
49+
models += [Seq2Seq(output_dim=output_dim, hidden_dim=hidden_dim, output_length=output_length,
50+
input_shape=(input_length, input_dim), depth=2)]
51+
models += [Seq2Seq(output_dim=output_dim, hidden_dim=hidden_dim, output_length=output_length,
52+
input_shape=(input_length, input_dim), peek=True, depth=2)]
53+
54+
for model in models:
55+
model.compile(loss='mse', optimizer='sgd')
56+
model.fit(x, y, epochs=1)
57+
58+
model = Seq2Seq(output_dim=output_dim, hidden_dim=hidden_dim, output_length=output_length,
59+
input_shape=(input_length, input_dim), peek=True, depth=2, teacher_force=True)
60+
model.compile(loss='mse', optimizer='sgd')
61+
model.fit([x, y], y, epochs=1)
62+
63+
64+
@keras_test
65+
def test_AttentionSeq2Seq():
66+
x = np.random.random((samples, input_length, input_dim))
67+
y = np.random.random((samples, output_length, output_dim))
68+
69+
models = []
70+
models += [AttentionSeq2Seq(output_dim=output_dim, hidden_dim=hidden_dim, output_length=output_length,
71+
input_shape=(input_length, input_dim))]
72+
models += [AttentionSeq2Seq(output_dim=output_dim, hidden_dim=hidden_dim, output_length=output_length,
73+
input_shape=(input_length, input_dim), depth=2)]
74+
models += [AttentionSeq2Seq(output_dim=output_dim, hidden_dim=hidden_dim, output_length=output_length,
75+
input_shape=(input_length, input_dim), depth=3)]
76+
77+
for model in models:
78+
model.compile(loss='mse', optimizer='sgd')
79+
model.fit(x, y, epochs=1)
80+
81+
# test_SimpleSeq2Seq()
82+
# test_Seq2Seq()
83+
# test_AttentionSeq2Seq()
84+
from seq2seq.models import AttentionSeq2Seq
85+
86+
model = AttentionSeq2Seq(input_dim=5, input_length=7, hidden_dim=10, output_length=8, output_dim=20, depth=4)
87+
model.compile(loss='mse', optimizer='rmsprop')

20pytorch/03.neural_network.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ def num_flat_features(self, x):
5151

5252
# loss function
5353
output = network(input)
54-
target = Variable(torch.arange(1, 11))
54+
target = Variable(torch.randn(1, 10))
5555
target = target.view(1, -1)
5656
criterion = nn.MSELoss()
5757
loss = criterion(output, target)

0 commit comments

Comments
 (0)