|
@@ -16,10 +16,13 @@ from keras import regularizers
|
16
|
16
|
from keras.models import Model
|
17
|
17
|
|
18
|
18
|
|
|
19
|
+epochs= 130
|
|
20
|
+size = 380000
|
|
21
|
+
|
19
|
22
|
def read_data(path):
|
20
|
23
|
lines = []
|
21
|
24
|
with open(path) as f:
|
22
|
|
- for x in range(300000): #380000
|
|
25
|
+ for x in range(size): #380000
|
23
|
26
|
lines.append(eval(f.readline().strip()))
|
24
|
27
|
|
25
|
28
|
random.shuffle(lines)
|
|
@@ -46,16 +49,16 @@ train_x,train_y,test_x,test_y=read_data("D:\\data\\quantization\\stock18_18d_tra
|
46
|
49
|
|
47
|
50
|
train_x_a = train_x[:,:18*18]
|
48
|
51
|
train_x_a = train_x_a.reshape(train_x.shape[0], 18, 18, 1)
|
49
|
|
-train_x_b = train_x[:, 18*18:18*18+2*18]
|
50
|
|
-train_x_b = train_x_b.reshape(train_x.shape[0], 18, 2, 1)
|
51
|
|
-train_x_c = train_x[:,18*18+2*18:]
|
|
52
|
+train_x_b = train_x[:, 18*18:18*18+8*18]
|
|
53
|
+train_x_b = train_x_b.reshape(train_x.shape[0], 18, 8, 1)
|
|
54
|
+train_x_c = train_x[:,18*18+8*18:]
|
52
|
55
|
|
53
|
56
|
|
54
|
57
|
def create_mlp(dim, regress=False):
|
55
|
58
|
# define our MLP network
|
56
|
59
|
model = Sequential()
|
57
|
|
- model.add(Dense(32, input_dim=dim, activation="relu"))
|
58
|
|
- model.add(Dense(32, activation="relu"))
|
|
60
|
+ model.add(Dense(16, input_dim=dim, activation="relu"))
|
|
61
|
+ model.add(Dense(16, activation="relu"))
|
59
|
62
|
|
60
|
63
|
# check to see if the regression node should be added
|
61
|
64
|
if regress:
|
|
@@ -65,7 +68,7 @@ def create_mlp(dim, regress=False):
|
65
|
68
|
return model
|
66
|
69
|
|
67
|
70
|
|
68
|
|
-def create_cnn(width, height, depth, filters=(4, 6), kernel_size=(5, 6), regress=False, output=24):
|
|
71
|
+def create_cnn(width, height, depth, filters=32, kernel_size=(5, 6), regress=False, output=24):
|
69
|
72
|
# initialize the input shape and channel dimension, assuming
|
70
|
73
|
# TensorFlow/channels-last ordering
|
71
|
74
|
inputShape = (width, height, 1)
|
|
@@ -77,7 +80,7 @@ def create_cnn(width, height, depth, filters=(4, 6), kernel_size=(5, 6), regress
|
77
|
80
|
x = inputs
|
78
|
81
|
|
79
|
82
|
# CONV => RELU => BN => POOL
|
80
|
|
- x = Conv2D(32, kernel_size, strides=2, padding="same")(x)
|
|
83
|
+ x = Conv2D(filters, kernel_size, strides=2, padding="same")(x)
|
81
|
84
|
x = Activation("relu")(x)
|
82
|
85
|
x = BatchNormalization(axis=chanDim)(x)
|
83
|
86
|
# x = MaxPooling2D(pool_size=(2, 2))(x)
|
|
@@ -111,8 +114,8 @@ def create_cnn(width, height, depth, filters=(4, 6), kernel_size=(5, 6), regress
|
111
|
114
|
|
112
|
115
|
# create the MLP and CNN models
|
113
|
116
|
mlp = create_mlp(train_x_c.shape[1], regress=False)
|
114
|
|
-cnn_0 = create_cnn(18, 18, 1, kernel_size=(10, 6), regress=False, output=256)
|
115
|
|
-cnn_1 = create_cnn(18, 2, 1, kernel_size=(10,2), regress=False, output=36)
|
|
117
|
+cnn_0 = create_cnn(18, 18, 1, kernel_size=(5, 6), filters=48, regress=False, output=256)
|
|
118
|
+cnn_1 = create_cnn(18, 8, 1, kernel_size=(5, 6), filters=32, regress=False, output=128)
|
116
|
119
|
|
117
|
120
|
# create the input to our final set of layers as the *output* of both
|
118
|
121
|
# the MLP and CNN
|
|
@@ -120,10 +123,10 @@ combinedInput = concatenate([mlp.output, cnn_0.output, cnn_1.output])
|
120
|
123
|
|
121
|
124
|
# our final FC layer head will have two dense layers, the final one
|
122
|
125
|
# being our regression head
|
123
|
|
-x = Dense(512, activation="relu", kernel_regularizer=regularizers.l1(0.003))(combinedInput)
|
|
126
|
+x = Dense(888, activation="relu", kernel_regularizer=regularizers.l1(0.003))(combinedInput)
|
124
|
127
|
x = Dropout(0.2)(x)
|
125
|
|
-x = Dense(512, activation="relu")(x)
|
126
|
|
-x = Dense(512, activation="relu")(x)
|
|
128
|
+x = Dense(888, activation="relu")(x)
|
|
129
|
+x = Dense(888, activation="relu")(x)
|
127
|
130
|
# 在建设一层
|
128
|
131
|
x = Dense(5, activation="softmax")(x)
|
129
|
132
|
|
|
@@ -148,14 +151,14 @@ model.fit(
|
148
|
151
|
[train_x_c, train_x_a, train_x_b], train_y,
|
149
|
152
|
# validation_data=([testAttrX, testImagesX], testY),
|
150
|
153
|
# epochs=int(3*train_x_a.shape[0]/1300),
|
151
|
|
- epochs=100,
|
152
|
|
- batch_size=2048, shuffle=True)
|
|
154
|
+ epochs=epochs,
|
|
155
|
+ batch_size=1024, shuffle=True)
|
153
|
156
|
|
154
|
157
|
test_x_a = test_x[:,:18*18]
|
155
|
158
|
test_x_a = test_x_a.reshape(test_x.shape[0], 18, 18, 1)
|
156
|
|
-test_x_b = test_x[:, 18*18:18*18+2*18]
|
157
|
|
-test_x_b = test_x_b.reshape(test_x.shape[0], 18, 2, 1)
|
158
|
|
-test_x_c = test_x[:,18*18+2*18:]
|
|
159
|
+test_x_b = test_x[:, 18*18:18*18+8*18]
|
|
160
|
+test_x_b = test_x_b.reshape(test_x.shape[0], 18, 8, 1)
|
|
161
|
+test_x_c = test_x[:,18*18+8*18:]
|
159
|
162
|
|
160
|
163
|
# make predictions on the testing data
|
161
|
164
|
print("[INFO] predicting house prices...")
|
|
@@ -165,6 +168,6 @@ print(score)
|
165
|
168
|
print('Test score:', score[0])
|
166
|
169
|
print('Test accuracy:', score[1])
|
167
|
170
|
|
168
|
|
-path="18_18d_mix_seq.h5"
|
|
171
|
+path="19_18d_mix_seq.h5"
|
169
|
172
|
model.save(path)
|
170
|
173
|
model=None
|