Skip to content

Commit e5c3f1b

Browse files
author
Julian Kates-Harbeck
committed
conf update
2 parents dea576e + 374e069 commit e5c3f1b

File tree

5 files changed

+65
-38
lines changed

5 files changed

+65
-38
lines changed

data/signals.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -137,8 +137,11 @@ def fetch_nstx_data(signal_path,shot_num,c):
137137

138138
profile_num_channels = 64
139139
#ZIPFIT comes from actual measurements
140-
etemp_profile = ProfileSignal("Electron temperature profile",["ppf/hrts/te","ZIPFIT01/PROFILES.ETEMPFIT"],[jet,d3d],mapping_paths=["ppf/hrts/rho",None],causal_shifts=[0,10],mapping_range=(0,1),num_channels=profile_num_channels,data_avail_tolerances=[0.05,0.02])
141-
edens_profile = ProfileSignal("Electron density profile",["ppf/hrts/ne","ZIPFIT01/PROFILES.EDENSFIT"],[jet,d3d],mapping_paths=["ppf/hrts/rho",None],causal_shifts=[0,10],mapping_range=(0,1),num_channels=profile_num_channels,data_avail_tolerances=[0.05,0.02])
140+
#etemp_profile = ProfileSignal("Electron temperature profile",["ppf/hrts/te","ZIPFIT01/PROFILES.ETEMPFIT"],[jet,d3d],mapping_paths=["ppf/hrts/rho",None],causal_shifts=[0,10],mapping_range=(0,1),num_channels=profile_num_channels,data_avail_tolerances=[0.05,0.02])
141+
#edens_profile = ProfileSignal("Electron density profile",["ppf/hrts/ne","ZIPFIT01/PROFILES.EDENSFIT"],[jet,d3d],mapping_paths=["ppf/hrts/rho",None],causal_shifts=[0,10],mapping_range=(0,1),num_channels=profile_num_channels,data_avail_tolerances=[0.05,0.02])
142+
143+
etemp_profile = ProfileSignal("Electron temperature profile",["ZIPFIT01/PROFILES.ETEMPFIT"],[d3d],mapping_paths=[None],causal_shifts=[10],mapping_range=(0,1),num_channels=profile_num_channels,data_avail_tolerances=[0.02])
144+
edens_profile = ProfileSignal("Electron density profile",["ZIPFIT01/PROFILES.EDENSFIT"],[d3d],mapping_paths=[None],causal_shifts=[10],mapping_range=(0,1),num_channels=profile_num_channels,data_avail_tolerances=[0.02])
142145
itemp_profile = ProfileSignal("Ion temperature profile",["ZIPFIT01/PROFILES.ITEMPFIT"],[d3d],causal_shifts=[10],mapping_range=(0,1),num_channels=profile_num_channels,data_avail_tolerances=[0.02])
143146
zdens_profile = ProfileSignal("Impurity density profile",["ZIPFIT01/PROFILES.ZDENSFIT"],[d3d],causal_shifts=[10],mapping_range=(0,1),num_channels=profile_num_channels,data_avail_tolerances=[0.02])
144147
trot_profile = ProfileSignal("Rotation profile",["ZIPFIT01/PROFILES.TROTFIT"],[d3d],causal_shifts=[10],mapping_range=(0,1),num_channels=profile_num_channels,data_avail_tolerances=[0.02])

examples/conf.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,11 +48,14 @@ data:
4848
window_size: 10
4949
#TODO optimize
5050
normalizer: 'var'
51+
norm_stat_range: 100.0
5152
equalize_classes: False
5253
# shallow_sample_prob: 0.01 #the fraction of samples with which to train the shallow model
5354
floatx: 'float32'
5455

5556
model:
57+
use_bidirectional: false
58+
use_batch_norm: false
5659
shallow: False
5760
shallow_model:
5861
num_samples: 1000000 #1000000 #the number of samples to use for training

plasma/models/builder.py

Lines changed: 50 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,8 @@
22
import keras
33
from keras.models import Sequential, Model
44
from keras.layers import Input
5-
from keras.layers.core import Dense, Activation, Dropout, Lambda, Reshape, Flatten, Permute
6-
from keras.layers.recurrent import LSTM, SimpleRNN
5+
from keras.layers.core import Dense, Activation, Dropout, Lambda, Reshape, Flatten, Permute, RepeatVector
6+
from keras.layers import LSTM, SimpleRNN, Bidirectional, BatchNormalization
77
from keras.layers.convolutional import Convolution1D
88
from keras.layers.pooling import MaxPooling1D
99
from keras.utils.data_utils import get_file
@@ -12,7 +12,6 @@
1212
from keras.callbacks import Callback
1313
from keras.regularizers import l1,l2,l1_l2
1414

15-
1615
import keras.backend as K
1716

1817
import dill
@@ -73,10 +72,12 @@ def get_0D_1D_indices(self):
7372
def build_model(self,predict,custom_batch_size=None):
7473
conf = self.conf
7574
model_conf = conf['model']
75+
use_bidirectional = model_conf['use_bidirectional']
7676
rnn_size = model_conf['rnn_size']
7777
rnn_type = model_conf['rnn_type']
7878
regularization = model_conf['regularization']
7979
dense_regularization = model_conf['dense_regularization']
80+
use_batch_norm = model_conf['use_batch_norm']
8081

8182
dropout_prob = model_conf['dropout_prob']
8283
length = model_conf['length']
@@ -118,6 +119,7 @@ def build_model(self,predict,custom_batch_size=None):
118119
batch_shape_non_temporal=(batch_size,num_signals)
119120

120121
indices_0d,indices_1d,num_0D,num_1D = self.get_0D_1D_indices()
122+
121123
def slicer(x,indices):
122124
return x[:,indices]
123125

@@ -130,23 +132,47 @@ def slicer_output_shape(input_shape,indices):
130132
pre_rnn_input = Input(shape=(num_signals,))
131133

132134
if num_1D > 0:
133-
#pre_rnn_0D = Lambda(lambda x: slicer(x,indices_0d),lambda s: slicer_output_shape(s,indices_0d))(pre_rnn_input)
134-
#pre_rnn_1D = Lambda(lambda x: slicer(x,indices_1d),lambda s: slicer_output_shape(s,indices_1d))(pre_rnn_input)
135-
#idx0D_tensor = K.variable(indices_0d)
136-
#idx1D_tensor = K.variable(indices_1d)
137135
pre_rnn_1D = Lambda(lambda x: x[:,len(indices_0d):],output_shape=(len(indices_1d),))(pre_rnn_input)
138136
pre_rnn_0D = Lambda(lambda x: x[:,:len(indices_0d)],output_shape=(len(indices_0d),))(pre_rnn_input)# slicer(x,indices_0d),lambda s: slicer_output_shape(s,indices_0d))(pre_rnn_input)
139137
pre_rnn_1D = Reshape((num_1D,len(indices_1d)//num_1D)) (pre_rnn_1D)
140138
pre_rnn_1D = Permute((2,1)) (pre_rnn_1D)
141139

142140
for i in range(model_conf['num_conv_layers']):
143141
div_fac = 2**i
144-
pre_rnn_1D = Convolution1D(num_conv_filters//div_fac,size_conv_filters,padding='valid',activation='relu') (pre_rnn_1D)
145-
pre_rnn_1D = Convolution1D(num_conv_filters//div_fac,1,padding='valid',activation='relu') (pre_rnn_1D)
142+
'''The first conv layer learns `num_conv_filters//div_fac` filters (aka kernels),
143+
each of size `(size_conv_filters, num1D)``. Its output will have shape
144+
(None, len(indices_1d)//num_1D - size_conv_filters + 1, num_conv_filters//div_fac),
145+
i.e., for each position in the input spatial series (direction along radius),
146+
the activation of each filter at that position.'''
147+
148+
'''For i=1 first conv layer would get:
149+
(None, (len(indices_1d)//num_1D - size_conv_filters + 1)/pool_size-size_conv_filters+1,num_conv_filters//div_fac)'''
150+
pre_rnn_1D = Convolution1D(num_conv_filters//div_fac,size_conv_filters,padding='valid') (pre_rnn_1D)
151+
if use_batch_norm: pre_rnn_1D = BatchNormalization()(pre_rnn_1D)
152+
pre_rnn_1D = Activation('relu')(pre_rnn_1D)
153+
154+
'''The output of the second conv layer will have shape
155+
(None, len(indices_1d)//num_1D - size_conv_filters + 1, num_conv_filters//div_fac),
156+
i.e., for each position in the input spatial series (direction along radius),
157+
the activation of each filter at that position.
158+
159+
for i=1 second layer would output
160+
(None, (len(indices_1d)//num_1D - size_conv_filters + 1)/pool_size-size_conv_filters+1,num_conv_filters//div_fac)'''
161+
pre_rnn_1D = Convolution1D(num_conv_filters//div_fac,1,padding='valid') (pre_rnn_1D)
162+
if use_batch_norm: pre_rnn_1D = BatchNormalization()(pre_rnn_1D)
163+
pre_rnn_1D = Activation('relu')(pre_rnn_1D)
164+
'''Outputs (None, (len(indices_1d)//num_1D - size_conv_filters + 1)/pool_size, num_conv_filters//div_fac)
165+
166+
for i=1 pooling layer would output:
167+
(None,((len(indices_1d)//num_1D- size_conv_filters + 1)/pool_size-size_conv_filters+1)/pool_size,num_conv_filters//div_fac)'''
146168
pre_rnn_1D = MaxPooling1D(pool_size) (pre_rnn_1D)
147169
pre_rnn_1D = Flatten() (pre_rnn_1D)
148-
pre_rnn_1D = Dense(dense_size,activation='relu',kernel_regularizer=l2(dense_regularization),bias_regularizer=l2(dense_regularization),activity_regularizer=l2(dense_regularization)) (pre_rnn_1D)
149-
pre_rnn_1D = Dense(dense_size//4,activation='relu',kernel_regularizer=l2(dense_regularization),bias_regularizer=l2(dense_regularization),activity_regularizer=l2(dense_regularization)) (pre_rnn_1D)
170+
pre_rnn_1D = Dense(dense_size,kernel_regularizer=l2(dense_regularization),bias_regularizer=l2(dense_regularization),activity_regularizer=l2(dense_regularization)) (pre_rnn_1D)
171+
if use_batch_norm: pre_rnn_1D = BatchNormalization()(pre_rnn_1D)
172+
pre_rnn_1D = Activation('relu')(pre_rnn_1D)
173+
pre_rnn_1D = Dense(dense_size//4,kernel_regularizer=l2(dense_regularization),bias_regularizer=l2(dense_regularization),activity_regularizer=l2(dense_regularization)) (pre_rnn_1D)
174+
if use_batch_norm: pre_rnn_1D = BatchNormalization()(pre_rnn_1D)
175+
pre_rnn_1D = Activation('relu')(pre_rnn_1D)
150176
pre_rnn = Concatenate() ([pre_rnn_0D,pre_rnn_1D])
151177
else:
152178
pre_rnn = pre_rnn_input
@@ -157,33 +183,22 @@ def slicer_output_shape(input_shape,indices):
157183
pre_rnn = Dense(dense_size//4,activation='relu',kernel_regularizer=l2(dense_regularization),bias_regularizer=l2(dense_regularization),activity_regularizer=l2(dense_regularization)) (pre_rnn)
158184

159185
pre_rnn_model = Model(inputs = pre_rnn_input,outputs=pre_rnn)
186+
pre_rnn_model.summary()
160187
x_input = Input(batch_shape = batch_input_shape)
161188
x_in = TimeDistributed(pre_rnn_model) (x_input)
162189

163-
# x_input = Input(batch_shape=batch_input_shape)
164-
# if num_1D > 0:
165-
# x_0D = Lambda(lambda x: slicer(x,indices_0d),lambda s: slicer_output_shape(s,indices_0d)) (x_input)
166-
# x_1D = Lambda(lambda x: slicer(x,indices_1d),lambda s: slicer_output_shape(s,indices_1d)) (x_input)
167-
#
168-
# x_1D = TimeDistributed(Reshape((num_1D,len(indices_1d)/num_1D))) (x_1D)
169-
# for i in range(model_conf['num_conv_layers']):
170-
# x_1D = TimeDistributed(Conv1D(num_conv_filters,size_conv_filters,activation='relu')) (x_1D)
171-
# x_1D = TimeDistributed(MaxPooling1D(pool_size)) (x_1D)
172-
# x_1D = TimeDistributed(Flatten()) (x_1D)
173-
# x_in = TimeDistributed(Concatenate) ([x_0D,x_1D])
174-
#
175-
# else:
176-
# x_in = x_input
177-
#x_in = TimeDistributed(Dense(100,activation='tanh')) (x_in)
178-
#x_in = TimeDistributed(Dense(30,activation='tanh')) (x_in)
179-
#x_in = TimeDistributed(Dense(2*(num_0D+num_1D)),activation='relu') (x_in)
180-
# x = TimeDistributed(Dense(2*(num_0D+num_1D)))
181-
# model.add(TimeDistributed(Dense(num_density_channels,bias=True),batch_input_shape=batch_input_shape))
182-
for _ in range(model_conf['rnn_layers']):
183-
x_in = rnn_model(rnn_size, return_sequences=return_sequences,#batch_input_shape=batch_input_shape,
184-
stateful=stateful,kernel_regularizer=l2(regularization),recurrent_regularizer=l2(regularization),
185-
bias_regularizer=l2(regularization),dropout=dropout_prob,recurrent_dropout=dropout_prob) (x_in)
186-
x_in = Dropout(dropout_prob) (x_in)
190+
if use_bidirectional:
191+
for _ in range(model_conf['rnn_layers']):
192+
x_in = Bidirectional(rnn_model(rnn_size, return_sequences=return_sequences,
193+
stateful=stateful,kernel_regularizer=l2(regularization),recurrent_regularizer=l2(regularization),
194+
bias_regularizer=l2(regularization),dropout=dropout_prob,recurrent_dropout=dropout_prob)) (x_in)
195+
x_in = Dropout(dropout_prob) (x_in)
196+
else:
197+
for _ in range(model_conf['rnn_layers']):
198+
x_in = rnn_model(rnn_size, return_sequences=return_sequences,#batch_input_shape=batch_input_shape,
199+
stateful=stateful,kernel_regularizer=l2(regularization),recurrent_regularizer=l2(regularization),
200+
bias_regularizer=l2(regularization),dropout=dropout_prob,recurrent_dropout=dropout_prob) (x_in)
201+
x_in = Dropout(dropout_prob) (x_in)
187202
if return_sequences:
188203
#x_out = TimeDistributed(Dense(100,activation='tanh')) (x_in)
189204
x_out = TimeDistributed(Dense(1,activation=output_activation)) (x_in)

plasma/preprocessor/normalize.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ def __init__(self,conf):
4343
self.remapper = conf['data']['target'].remapper
4444
self.machines = set()
4545
self.inference_mode = False
46+
self.bound = self.conf['data']['norm_stat_range']
4647

4748
@abc.abstractmethod
4849
def __str__(self):
@@ -192,6 +193,7 @@ def __init__(self,conf):
192193
Normalizer.__init__(self,conf)
193194
self.means = dict()
194195
self.stds = dict()
196+
self.bound = self.conf['data']['norm_stat_range']
195197

196198
def __str__(self):
197199
s = ''
@@ -246,6 +248,7 @@ def apply(self,shot):
246248
if stds_curr == 0.0:
247249
stds_curr = 1.0
248250
shot.signals_dict[sig] = (shot.signals_dict[sig] - means[i])/stds_curr
251+
shot.signals_dict[sig] = np.clip(shot.signals_dict[sig],-self.bound,self.bound)
249252

250253
shot.ttd = self.remapper(shot.ttd,self.conf['data']['T_warning'])
251254
self.cut_end_of_shot(shot)
@@ -288,6 +291,7 @@ def apply(self,shot):
288291
if stds_curr == 0.0:
289292
stds_curr = 1.0
290293
shot.signals_dict[sig] = (shot.signals_dict[sig])/stds_curr
294+
shot.signals_dict[sig] = np.clip(shot.signals_dict[sig],-self.bound,self.bound)
291295
shot.ttd = self.remapper(shot.ttd,self.conf['data']['T_warning'])
292296
self.cut_end_of_shot(shot)
293297

@@ -312,6 +316,7 @@ def apply(self,shot):
312316
for (i,sig) in enumerate(shot.signals):
313317
if sig.normalize:
314318
shot.signals_dict[sig] = apply_along_axis(lambda m : correlate(m,window,'valid'),axis=0,arr=shot.signals_dict[sig])
319+
shot.signals_dict[sig] = np.clip(shot.signals_dict[sig],-self.bound,self.bound)
315320
shot.ttd = shot.ttd[-shot.signals.shape[0]:]
316321

317322
def __str__(self):
@@ -330,6 +335,7 @@ def __init__(self,conf):
330335
Normalizer.__init__(self,conf)
331336
self.minimums = None
332337
self.maximums = None
338+
self.bound = self.conf['data']['norm_stat_range']
333339

334340

335341
def __str__(self):
@@ -379,6 +385,7 @@ def apply(self,shot):
379385
for (i,sig) in enumerate(shot.signals):
380386
if sig.normalize:
381387
shot.signals_dict[sig] = (shot.signals_dict[sig] - self.minimums[m])/(self.maximums[m] - self.minimums[m])
388+
shot.signals_dict[sig] = np.clip(shot.signals_dict[sig],-self.bound,self.bound)
382389
shot.ttd = self.remapper(shot.ttd,self.conf['data']['T_warning'])
383390
self.cut_end_of_shot(shot)
384391
# self.apply_positivity_mask(shot)

plasma/utils/performance.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -840,7 +840,6 @@ def tradeoff_plot(self,accuracy_range,missed_range,fp_range,early_alarm_range,sa
840840
plt.ylim([0,1])
841841
if save_figure:
842842
plt.savefig(title_str + '_roc.png',bbox_inches='tight',dpi=200)
843-
#np.savez(title_str + '_roc.npz',"P_thresh_range",P_thresh_range,"missed_range",missed_range,"fp_range",fp_range)
844843
print('ROC area ({}) is {}'.format(plot_string,self.roc_from_missed_fp(missed_range,fp_range)))
845844
return P_thresh_range,missed_range,fp_range
846845

0 commit comments

Comments
 (0)