-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathmodels.py
146 lines (136 loc) · 7.94 KB
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
from tensorflow.keras.layers import Dense, Dropout, Activation, BatchNormalization, Input, Conv2D, MaxPooling2D, Flatten,ZeroPadding2D, AveragePooling2D, GlobalMaxPooling2D, SeparableConv2D, DepthwiseConv2D
from tensorflow.keras.regularizers import l1
from tensorflow.keras.models import Model
import tensorflow.keras.backend as K
from qkeras import QActivation
from qkeras import QDense, QConv2D
from qkeras import quantized_bits
from qkeras import QBatchNormalization
import sys
from tensorflow_model_optimization.python.core.sparsity.keras import prune
from tensorflow_model_optimization.python.core.sparsity.keras import pruning_callbacks
from tensorflow_model_optimization.python.core.sparsity.keras import pruning_schedule
from tensorflow_model_optimization.sparsity import keras as sparsity
def float_cnn(name_, Inputs,nclasses,filters,kernel,strides, pooling, neurons, dropout, activation, pruning_params = {}):
print ("Building model: float_cnn")
length = len(filters)
if any(len(lst) != length for lst in [filters, kernel, strides,pooling,dropout]):
sys.exit("One value for stride and kernel must be added for each filter! Exiting")
x = x_in = Inputs
x = BatchNormalization()(x)
# x = ZeroPadding2D( padding=(1, 1), data_format="channels_last") (x)
for i,(f,k,s,p,d) in enumerate(zip(filters,kernel,strides,pooling,dropout)):
print (("Adding CONV block with {} filters, kernel_size=({},{}), strides=({},{})").format(f,k,k,s,s))
x = Conv2D(int(f), kernel_size=(int(k), int(k)), strides=(int(s),int(s)), kernel_initializer='lecun_uniform', use_bias=False,
name='conv_%i'%i)(x)
if float(p) == 1:
x = AveragePooling2D(pool_size = (int(p),int(p)) )(x)
if float(p) == 2:
x = MaxPooling2D(pool_size = (int(p),int(p)) )(x)
x = BatchNormalization()(x)
x = Activation(activation,name='conv_act_%i'%i)(x)
x = Flatten()(x)
for i,n in enumerate(neurons):
print (("Adding DENSE block with {} neurons").format(n))
x = Dense(n,kernel_initializer='lecun_uniform',name='dense_%i'%i, use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation(activation,name='dense_act_%i'%i)(x)
x = Dense(nclasses,name='output_dense')(x)
# x = BatchNormalization()(x)
x_out = Activation('softmax',name='output_softmax')(x)
model = Model(inputs=[x_in], outputs=[x_out], name=name_)
return model
def qkeras_cnn(name_, Inputs,nclasses,filters,kernel,strides, pooling, dropout, activation, pruning_params = {},qb=quantized_bits(6,0,alpha=1)):
length = len(filters)
if any(len(lst) != length for lst in [filters, kernel, strides,pooling,dropout]):
sys.exit("One value for stride and kernel must be added for each filter! Exiting")
x = x_in = Inputs
x = BatchNormalization()(x)
x = ZeroPadding2D( padding=(1, 1), data_format="channels_last") (x)
for i,(f,k,s,p,d) in enumerate(zip(filters,kernel,strides,pooling,dropout)):
print (("Adding layer with {} filters, kernel_size=({},{}), strides=({},{})").format(f,k,k,s,s))
x = QConv2D(int(f), kernel_size=(int(k), int(k)), strides=(int(s),int(s)),
kernel_quantizer=qb, bias_quantizer=qb,
kernel_initializer='lecun_uniform', kernel_regularizer=l1(0.0001), use_bias=False, name='conv_%i'%i)(x)
if float(p) != 0:
x = MaxPooling2D(pool_size = (int(p),int(p)) )(x)
x = BatchNormalization()(x)
x = Activation(activation,name='conv_act_%i'%i)(x)
x = Flatten()(x)
x = QDense(128,kernel_quantizer=qb, bias_quantizer=qb,kernel_initializer='lecun_uniform', kernel_regularizer=l1(0.0001),name='dense_1', use_bias=False)(x)
x = Dropout(0.25) (x)
x = BatchNormalization()(x)
x = Activation(activation,name='dense_act')(x)
x_out = Dense(nclasses, activation='softmax',name='output')(x)
model = Model(inputs=[x_in], outputs=[x_out], name=name_)
return model
def float_cnn_densePrune(name_, Inputs,nclasses,filters,kernel,strides, pooling, dropout, activation, pruning_params = {}):
length = len(filters)
if any(len(lst) != length for lst in [filters, kernel, strides,pooling,dropout]):
sys.exit("One value for stride and kernel must be added for each filter! Exiting")
x = x_in = Inputs
x = BatchNormalization()(x)
x = ZeroPadding2D( padding=(1, 1), data_format="channels_last") (x)
for i,(f,k,s,p,d) in enumerate(zip(filters,kernel,strides,pooling,dropout)):
print (("Adding layer with {} filters, kernel_size=({},{}), strides=({},{})").format(f,k,k,s,s))
x = Conv2D(int(f), kernel_size=(int(k), int(k)), strides=(int(s),int(s)), kernel_initializer='lecun_uniform', kernel_regularizer=l1(0.0001), use_bias=False,
name='conv_%i'%i)(x)
if float(p) != 0:
x = MaxPooling2D(pool_size = (int(p),int(p)) )(x)
x = BatchNormalization()(x)
x = Activation(activation,name='conv_act_%i'%i)(x)
x = Flatten()(x)
x = prune.prune_low_magnitude( (Dense(128,kernel_initializer='lecun_uniform', kernel_regularizer=l1(0.0001), use_bias=False,name='dense_1')),**pruning_params) (x)
x = Dropout(0.25) (x)
x = BatchNormalization()(x)
x = Activation(activation,name='dense_act')(x)
x_out = Dense(nclasses, activation='softmax',name='output')(x)
model = Model(inputs=[x_in], outputs=[x_out], name=name_)
return model
def float_cnn_allPrune(name_, Inputs,nclasses,filters,kernel,strides, pooling, dropout, activation, pruning_params = {}):
print ("Building model: float_cnn")
length = len(filters)
if any(len(lst) != length for lst in [filters, kernel, strides,pooling,dropout]):
sys.exit("One value for stride and kernel must be added for each filter! Exiting")
x = x_in = Inputs
x = BatchNormalization()(x)
x = ZeroPadding2D( padding=(1, 1), data_format="channels_last") (x)
for i,(f,k,s,p,d) in enumerate(zip(filters,kernel,strides,pooling,dropout)):
print (("Adding layer with {} filters, kernel_size=({},{}), strides=({},{})").format(f,k,k,s,s))
x = prune.prune_low_magnitude( (Conv2D(int(f), kernel_size=(int(k), int(k)), strides=(int(s),int(s)), use_bias=False, name='conv_%i'%i)),**pruning_params) (x)
if float(p) != 0:
x = MaxPooling2D(pool_size = (int(p),int(p)) )(x)
x = BatchNormalization()(x)
x = Activation(activation,name='conv_act_%i'%i)(x)
x = Flatten()(x)
x = prune.prune_low_magnitude( (Dense(128,kernel_initializer='lecun_uniform', use_bias=False,name='dense_1')) ,**pruning_params) (x)
x = BatchNormalization()(x)
x = Activation(activation,name='dense_act')(x)
x_out = Dense(nclasses, activation='softmax',name='output')(x)
model = Model(inputs=[x_in], outputs=[x_out], name=name_)
return model
def float_cnn_1L_Prune(name_, Inputs,nclasses,filters,kernel,strides, pooling, dropout, activation, pruning_params = {}):
print ("Building model: float_cnn")
length = len(filters)
if any(len(lst) != length for lst in [filters, kernel, strides,pooling,dropout]):
sys.exit("One value for stride and kernel must be added for each filter! Exiting")
x = x_in = Inputs
x = BatchNormalization()(x)
x = ZeroPadding2D( padding=(1, 1), data_format="channels_last") (x)
for i,(f,k,s,p,d) in enumerate(zip(filters,kernel,strides,pooling,dropout)):
print (("Adding layer with {} filters, kernel_size=({},{}), strides=({},{})").format(f,k,k,s,s))
if i == 1:
x = prune.prune_low_magnitude( (Conv2D(int(f), kernel_size=(int(k), int(k)), strides=(int(s),int(s)), use_bias=False, name='conv_%i'%i)),**pruning_params) (x)
else:
x = Conv2D(int(f), kernel_size=(int(k), int(k)), strides=(int(s),int(s)), kernel_initializer='lecun_uniform', use_bias=False, name='conv_%i'%i)(x)
if float(p) != 0:
x = MaxPooling2D(pool_size = (int(p),int(p)) )(x)
x = BatchNormalization()(x)
x = Activation(activation,name='conv_act_%i'%i)(x)
x = Flatten()(x)
x = Dense(128,kernel_initializer='lecun_uniform', use_bias=False,name='dense_1')(x)
x = BatchNormalization()(x)
x = Activation(activation,name='dense_act')(x)
x_out = Dense(nclasses, activation='softmax',name='output')(x)
model = Model(inputs=[x_in], outputs=[x_out], name=name_)
return model