yufeng 4 years ago
parent
commit
cac0636310

+ 108 - 0
mix/lstm_predict.py

@@ -0,0 +1,108 @@
1
+# -*- encoding:utf-8 -*-
2
+import numpy as np
3
+from keras.models import load_model
4
+import joblib
5
+
6
+
7
+def read_data(path):
8
+    lines = []
9
+    with open(path) as f:
10
+        for line in f.readlines()[:]:
11
+            line = eval(line.strip())
12
+            if line[-2][0].startswith('0') or line[-2][0].startswith('3'):
13
+                lines.append(line)
14
+
15
+    size = len(lines[0])
16
+    train_x=[s[:size - 2] for s in lines]
17
+    train_y=[s[size-1] for s in lines]
18
+    return np.array(train_x),np.array(train_y),lines
19
+
20
+
21
+def _score(fact, line):
22
+    with open('mix_predict_dmi_18d.txt', 'a') as f:
23
+        f.write(str([line[-2], line[-1]]) + "\n")
24
+
25
+    up_right = 0
26
+    up_error = 0
27
+
28
+    if fact[0] == 1:
29
+        up_right = up_right + 1.12
30
+    elif fact[1] == 1:
31
+        up_right = up_right + 1.06
32
+    elif fact[2] == 1:
33
+        up_right = up_right + 1
34
+        up_error = up_error + 0.5
35
+    elif fact[3] == 1:
36
+        up_right = up_right + 0.94
37
+        up_error = up_error + 1
38
+    else:
39
+        up_error = up_error + 1
40
+        up_right = up_right + 0.88
41
+    return up_right,up_error
42
+
43
+
44
+def predict(file_path='', model_path='15min_dnn_seq.h5', idx=-1):
45
+    test_x,test_y,lines=read_data(file_path)
46
+    print('Load data success')
47
+
48
+    test_x_a = test_x[:,:18*16]
49
+    test_x_a = test_x_a.reshape(test_x.shape[0], 18, 16)
50
+    test_x_b = test_x[:, 18*16:18*16+10*18]
51
+    test_x_b = test_x_b.reshape(test_x.shape[0], 18, 10, 1)
52
+    test_x_c = test_x[:,18*16+10*18:]
53
+
54
+    model=load_model(model_path)
55
+    score = model.evaluate([test_x_c, test_x_a, test_x_b], test_y)
56
+    print('MIX', score)
57
+
58
+    up_num = 0
59
+    up_error = 0
60
+    up_right = 0
61
+    down_num = 0
62
+    down_error = 0
63
+    down_right = 0
64
+    i = 0
65
+    result=model.predict([test_x_c, test_x_a, test_x_b])
66
+    win_dnn = []
67
+    for r in result:
68
+        fact = test_y[i]
69
+
70
+        if idx in [-2]:
71
+            if r[0] > 0.5 or r[1] > 0.5:
72
+                pass
73
+        else:
74
+            if r[0] > 0.6 or r[1] > 0.6:
75
+                tmp_right,tmp_error = _score(fact, lines[i])
76
+                up_right = tmp_right + up_right
77
+                up_error = tmp_error + up_error
78
+                up_num = up_num + 1
79
+            elif r[3] > 0.7 or r[4] > 0.7:
80
+                if fact[0] == 1:
81
+                    down_error = down_error + 1
82
+                    down_right = down_right + 1.12
83
+                elif fact[1] == 1:
84
+                    down_error = down_error + 1
85
+                    down_right = down_right + 1.06
86
+                elif fact[2] == 1:
87
+                    down_error = down_error + 0.5
88
+                    down_right = down_right + 1
89
+                elif fact[3] == 1:
90
+                    down_right = down_right + 0.94
91
+                else:
92
+                    down_right = down_right + 0.88
93
+                down_num = down_num + 1
94
+
95
+        i = i + 1
96
+    if up_num == 0:
97
+        up_num = 1
98
+    if down_num == 0:
99
+        down_num = 1
100
+    print('MIX', up_right, up_num, up_right/up_num, up_error/up_num, down_right/down_num, down_error/down_num)
101
+    return win_dnn,up_right/up_num,down_right/down_num
102
+
103
+
104
+if __name__ == '__main__':
105
+    predict(file_path='D:\\data\\quantization\\stock19_18d_test.log', model_path='19_18d_lstm_seq.h5')
106
+    # predict(file_path='D:\\data\\quantization\\stock6_test.log', model_path='15m_dnn_seq.h5')
107
+    # multi_predict(model='15_18d')
108
+    # predict_today(20200229, model='11_18d')

+ 205 - 0
mix/lstm_train.py

@@ -0,0 +1,205 @@
1
+import keras
2
+# -*- encoding:utf-8 -*-
3
+import numpy as np
4
+from keras.models import Sequential
5
+# 优化方法选用Adam(其实可选项有很多,如SGD)
6
+from keras.optimizers import Adam
7
+import random
8
+from keras.models import load_model
9
+from imblearn.over_sampling import RandomOverSampler
10
+from keras.utils import np_utils
11
+# 用于模型初始化,Conv2D模型初始化、Activation激活函数,MaxPooling2D是池化层
12
+# Flatten作用是将多位输入进行一维化
13
+# Dense是全连接层
14
+from keras.layers import Conv2D, Activation, MaxPool2D, Flatten, Dense,Dropout,Input,MaxPooling2D,BatchNormalization,concatenate
15
+from keras.layers import LSTM
16
+from keras import regularizers
17
+from keras.models import Model
18
+
19
+from keras.callbacks import EarlyStopping
20
+
21
+early_stopping = EarlyStopping(monitor='accuracy', patience=5, verbose=2)
22
+
23
+epochs= 40
24
+size = 80000
25
+
26
+
27
+def read_data(path):
28
+    lines = []
29
+    with open(path) as f:
30
+        for x in range(size): #380000
31
+            lines.append(eval(f.readline().strip()))
32
+
33
+    random.shuffle(lines)
34
+    print('读取数据完毕')
35
+
36
+    d=int(0.7*len(lines))
37
+
38
+    train_x=[s[:-2] for s in lines[0:d]]
39
+    train_y=[s[-1] for s in lines[0:d]]
40
+    test_x=[s[:-2] for s in lines[d:]]
41
+    test_y=[s[-1] for s in lines[d:]]
42
+
43
+    print('转换数据完毕')
44
+
45
+    ros = RandomOverSampler(random_state=0)
46
+    X_resampled, y_resampled = ros.fit_sample(np.array(train_x), np.array(train_y))
47
+
48
+    print('数据重采样完毕')
49
+
50
+    return X_resampled,y_resampled,np.array(test_x),np.array(test_y)
51
+
52
+
53
+train_x,train_y,test_x,test_y=read_data("D:\\data\\quantization\\stock19_18d_train_1.log")
54
+
55
+train_x_a = train_x[:,:18*16]
56
+train_x_a = train_x_a.reshape(train_x.shape[0], 18, 16)
57
+train_x_b = train_x[:, 18*16:18*16+10*18]
58
+train_x_b = train_x_b.reshape(train_x.shape[0], 18, 10, 1)
59
+train_x_c = train_x[:,18*16+10*18:]
60
+
61
+
62
+def create_mlp(dim, regress=False):
63
+    # define our MLP network
64
+    model = Sequential()
65
+    model.add(Dense(16, input_dim=dim, activation="relu"))
66
+    model.add(Dense(16, activation="relu"))
67
+
68
+    # check to see if the regression node should be added
69
+    if regress:
70
+        model.add(Dense(1, activation="linear"))
71
+
72
+    # return our model
73
+    return model
74
+
75
+
76
+def create_cnn(width, height, depth, filters=32, kernel_size=(5, 6), regress=False, output=24):
77
+    # initialize the input shape and channel dimension, assuming
78
+    # TensorFlow/channels-last ordering
79
+    inputShape = (width, height, 1)
80
+    chanDim = -1
81
+
82
+    # define the model input
83
+    inputs = Input(shape=inputShape)
84
+
85
+    x = inputs
86
+
87
+    # CONV => RELU => BN => POOL
88
+    x = Conv2D(filters, kernel_size, strides=(2,2), padding="same",
89
+               # data_format='channels_first'
90
+               )(x)
91
+    x = Activation("relu")(x)
92
+    x = BatchNormalization(axis=chanDim)(x)
93
+    # x = MaxPooling2D(pool_size=(2, 2))(x)
94
+    # if width > 2:
95
+    #     x = Conv2D(32, (10, 6), padding="same")(x)
96
+    #     x = Activation("relu")(x)
97
+    #     x = BatchNormalization(axis=chanDim)(x)
98
+
99
+    # flatten the volume, then FC => RELU => BN => DROPOUT
100
+    x = Flatten()(x)
101
+    x = Dense(output)(x)
102
+    x = Activation("relu")(x)
103
+    x = BatchNormalization(axis=chanDim)(x)
104
+    x = Dropout(0.2)(x)
105
+
106
+    # apply another FC layer, this one to match the number of nodes
107
+    # coming out of the MLP
108
+    x = Dense(output)(x)
109
+    x = Activation("relu")(x)
110
+
111
+    # check to see if the regression node should be added
112
+    if regress:
113
+        x = Dense(1, activation="linear")(x)
114
+
115
+    # construct the CNN
116
+    model = Model(inputs, x)
117
+
118
+    # return the CNN
119
+    return model
120
+
121
+
122
+def create_lstm(sample, timesteps, input_dim):
123
+    inputShape = (timesteps, input_dim)
124
+
125
+    # define the model input
126
+    inputs = Input(shape=inputShape)
127
+
128
+    x = inputs
129
+
130
+    x = LSTM(units = 32, input_shape=(18, 16), dropout=0.2
131
+               )(x)
132
+    # x = LSTM(16*16, return_sequences=False)
133
+    # x = Activation("relu")(x)
134
+    x = Dense(64)(x)
135
+    x = Dropout(0.2)(x)
136
+    x = Activation("relu")(x)
137
+
138
+    # construct the CNN
139
+    model = Model(inputs, x)
140
+
141
+    # return the CNN
142
+    return model
143
+
144
+# create the MLP and CNN models
145
+mlp = create_mlp(train_x_c.shape[1], regress=False)
146
+cnn_0 = create_lstm(train_x_a.shape[1], 18, 16)
147
+cnn_1 = create_cnn(18, 10, 1, kernel_size=(3, 5), filters=32, regress=False, output=120)
148
+
149
+# create the input to our final set of layers as the *output* of both
150
+# the MLP and CNN
151
+combinedInput = concatenate([mlp.output, cnn_0.output, cnn_1.output])
152
+
153
+# our final FC layer head will have two dense layers, the final one
154
+# being our regression head
155
+x = Dense(888, activation="relu", kernel_regularizer=regularizers.l1(0.003))(combinedInput)
156
+x = Dropout(0.2)(x)
157
+x = Dense(888, activation="relu")(x)
158
+x = Dense(888, activation="relu")(x)
159
+# 在建设一层
160
+x = Dense(5, activation="softmax")(x)
161
+
162
+# our final model will accept categorical/numerical data on the MLP
163
+# input and images on the CNN input, outputting a single value (the
164
+# predicted price of the house)
165
+model = Model(inputs=[mlp.input, cnn_0.input, cnn_1.input], outputs=x)
166
+
167
+
168
+print("Starting training ")
169
+# h = model.fit(train_x, train_y, batch_size=4096*2, epochs=500, shuffle=True)
170
+
171
+# compile the model using mean absolute percentage error as our loss,
172
+# implying that we seek to minimize the absolute percentage difference
173
+# between our price *predictions* and the *actual prices*
174
+opt = Adam(lr=1e-3, decay=1e-3 / 200)
175
+model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=['accuracy'],
176
+              )
177
+
178
+# train the model
179
+print("[INFO] training model...")
180
+model.fit(
181
+    [train_x_c, train_x_a, train_x_b], train_y,
182
+    # validation_data=([testAttrX, testImagesX], testY),
183
+    # epochs=int(3*train_x_a.shape[0]/1300),
184
+    epochs=epochs,
185
+    batch_size=2048, shuffle=True,
186
+    callbacks=[early_stopping]
187
+)
188
+
189
+test_x_a = test_x[:,:18*16]
190
+test_x_a = test_x_a.reshape(test_x.shape[0], 18, 16)
191
+test_x_b = test_x[:, 18*16:18*16+10*18]
192
+test_x_b = test_x_b.reshape(test_x.shape[0], 18, 10, 1)
193
+test_x_c = test_x[:,18*16+10*18:]
194
+
195
+# make predictions on the testing data
196
+print("[INFO] predicting house prices...")
197
+score  = model.evaluate([test_x_c, test_x_a, test_x_b], test_y)
198
+
199
+print(score)
200
+print('Test score:', score[0])
201
+print('Test accuracy:', score[1])
202
+
203
+path="19_18d_lstm_seq.h5"
204
+model.save(path)
205
+model=None

+ 5 - 5
mix/mix_predict.py

@@ -45,11 +45,11 @@ def predict(file_path='', model_path='15min_dnn_seq.h5', idx=-1):
45
     test_x,test_y,lines=read_data(file_path)
45
     test_x,test_y,lines=read_data(file_path)
46
     print('Load data success')
46
     print('Load data success')
47
 
47
 
48
-    test_x_a = test_x[:,:18*18]
49
-    test_x_a = test_x_a.reshape(test_x.shape[0], 18, 18, 1)
50
-    test_x_b = test_x[:, 18*18:18*18+8*18]
51
-    test_x_b = test_x_b.reshape(test_x.shape[0], 18, 8, 1)
52
-    test_x_c = test_x[:,18*18+8*18:]
48
+    test_x_a = test_x[:,:18*16]
49
+    test_x_a = test_x_a.reshape(test_x.shape[0], 18, 16, 1)
50
+    test_x_b = test_x[:, 18*16:18*16+10*18]
51
+    test_x_b = test_x_b.reshape(test_x.shape[0], 18, 10, 1)
52
+    test_x_c = test_x[:,18*16+10*18:]
53
 
53
 
54
     model=load_model(model_path)
54
     model=load_model(model_path)
55
     score = model.evaluate([test_x_c, test_x_a, test_x_b], test_y)
55
     score = model.evaluate([test_x_c, test_x_a, test_x_b], test_y)

+ 30 - 22
mix/mix_train.py

@@ -15,9 +15,13 @@ from keras.layers import Conv2D, Activation, MaxPool2D, Flatten, Dense,Dropout,I
15
 from keras import regularizers
15
 from keras import regularizers
16
 from keras.models import Model
16
 from keras.models import Model
17
 
17
 
18
+from keras.callbacks import EarlyStopping
19
+
20
+early_stopping = EarlyStopping(monitor='accuracy', patience=5, verbose=2)
21
+
22
+epochs= 50
23
+size = 80000
18
 
24
 
19
-epochs= 130
20
-size = 380000
21
 
25
 
22
 def read_data(path):
26
 def read_data(path):
23
     lines = []
27
     lines = []
@@ -45,13 +49,13 @@ def read_data(path):
45
     return X_resampled,y_resampled,np.array(test_x),np.array(test_y)
49
     return X_resampled,y_resampled,np.array(test_x),np.array(test_y)
46
 
50
 
47
 
51
 
48
-train_x,train_y,test_x,test_y=read_data("D:\\data\\quantization\\stock18_18d_train_1.log")
52
+train_x,train_y,test_x,test_y=read_data("D:\\data\\quantization\\stock19_18d_train_1.log")
49
 
53
 
50
-train_x_a = train_x[:,:18*18]
51
-train_x_a = train_x_a.reshape(train_x.shape[0], 18, 18, 1)
52
-train_x_b = train_x[:, 18*18:18*18+8*18]
53
-train_x_b = train_x_b.reshape(train_x.shape[0], 18, 8, 1)
54
-train_x_c = train_x[:,18*18+8*18:]
54
+train_x_a = train_x[:,:18*16]
55
+train_x_a = train_x_a.reshape(train_x.shape[0], 18, 16, 1)
56
+train_x_b = train_x[:, 18*16:18*16+10*18]
57
+train_x_b = train_x_b.reshape(train_x.shape[0], 18, 10, 1)
58
+train_x_c = train_x[:,18*16+10*18:]
55
 
59
 
56
 
60
 
57
 def create_mlp(dim, regress=False):
61
 def create_mlp(dim, regress=False):
@@ -80,7 +84,9 @@ def create_cnn(width, height, depth, filters=32, kernel_size=(5, 6), regress=Fal
80
     x = inputs
84
     x = inputs
81
 
85
 
82
     # CONV => RELU => BN => POOL
86
     # CONV => RELU => BN => POOL
83
-    x = Conv2D(filters, kernel_size, strides=2, padding="same")(x)
87
+    x = Conv2D(filters, kernel_size, strides=2, padding="same",
88
+               # data_format='channels_first'
89
+               )(x)
84
     x = Activation("relu")(x)
90
     x = Activation("relu")(x)
85
     x = BatchNormalization(axis=chanDim)(x)
91
     x = BatchNormalization(axis=chanDim)(x)
86
     # x = MaxPooling2D(pool_size=(2, 2))(x)
92
     # x = MaxPooling2D(pool_size=(2, 2))(x)
@@ -114,8 +120,8 @@ def create_cnn(width, height, depth, filters=32, kernel_size=(5, 6), regress=Fal
114
 
120
 
115
 # create the MLP and CNN models
121
 # create the MLP and CNN models
116
 mlp = create_mlp(train_x_c.shape[1], regress=False)
122
 mlp = create_mlp(train_x_c.shape[1], regress=False)
117
-cnn_0 = create_cnn(18, 18, 1, kernel_size=(5, 6), filters=48, regress=False, output=256)
118
-cnn_1 = create_cnn(18, 8, 1, kernel_size=(5, 6), filters=32, regress=False, output=128)
123
+cnn_0 = create_cnn(18, 16, 1, kernel_size=(3, 5), filters=32, regress=False, output=150)
124
+cnn_1 = create_cnn(18, 10, 1, kernel_size=(3, 4), filters=32, regress=False, output=120)
119
 
125
 
120
 # create the input to our final set of layers as the *output* of both
126
 # create the input to our final set of layers as the *output* of both
121
 # the MLP and CNN
127
 # the MLP and CNN
@@ -123,10 +129,9 @@ combinedInput = concatenate([mlp.output, cnn_0.output, cnn_1.output])
123
 
129
 
124
 # our final FC layer head will have two dense layers, the final one
130
 # our final FC layer head will have two dense layers, the final one
125
 # being our regression head
131
 # being our regression head
126
-x = Dense(888, activation="relu", kernel_regularizer=regularizers.l1(0.003))(combinedInput)
132
+x = Dense(1024, activation="relu", kernel_regularizer=regularizers.l1(0.003))(combinedInput)
127
 x = Dropout(0.2)(x)
133
 x = Dropout(0.2)(x)
128
-x = Dense(888, activation="relu")(x)
129
-x = Dense(888, activation="relu")(x)
134
+x = Dense(1024, activation="relu")(x)
130
 # 在建设一层
135
 # 在建设一层
131
 x = Dense(5, activation="softmax")(x)
136
 x = Dense(5, activation="softmax")(x)
132
 
137
 
@@ -143,7 +148,8 @@ print("Starting training ")
143
 # implying that we seek to minimize the absolute percentage difference
148
 # implying that we seek to minimize the absolute percentage difference
144
 # between our price *predictions* and the *actual prices*
149
 # between our price *predictions* and the *actual prices*
145
 opt = Adam(lr=1e-3, decay=1e-3 / 200)
150
 opt = Adam(lr=1e-3, decay=1e-3 / 200)
146
-model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=['accuracy'])
151
+model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=['accuracy'],
152
+              )
147
 
153
 
148
 # train the model
154
 # train the model
149
 print("[INFO] training model...")
155
 print("[INFO] training model...")
@@ -152,13 +158,15 @@ model.fit(
152
     # validation_data=([testAttrX, testImagesX], testY),
158
     # validation_data=([testAttrX, testImagesX], testY),
153
     # epochs=int(3*train_x_a.shape[0]/1300),
159
     # epochs=int(3*train_x_a.shape[0]/1300),
154
     epochs=epochs,
160
     epochs=epochs,
155
-    batch_size=1024, shuffle=True)
156
-
157
-test_x_a = test_x[:,:18*18]
158
-test_x_a = test_x_a.reshape(test_x.shape[0], 18, 18, 1)
159
-test_x_b = test_x[:, 18*18:18*18+8*18]
160
-test_x_b = test_x_b.reshape(test_x.shape[0], 18, 8, 1)
161
-test_x_c = test_x[:,18*18+8*18:]
161
+    batch_size=2048, shuffle=True,
162
+    callbacks=[early_stopping]
163
+)
164
+
165
+test_x_a = test_x[:,:18*16]
166
+test_x_a = test_x_a.reshape(test_x.shape[0], 18, 16, 1)
167
+test_x_b = test_x[:, 18*16:18*16+10*18]
168
+test_x_b = test_x_b.reshape(test_x.shape[0], 18, 10, 1)
169
+test_x_c = test_x[:,18*16+10*18:]
162
 
170
 
163
 # make predictions on the testing data
171
 # make predictions on the testing data
164
 print("[INFO] predicting house prices...")
172
 print("[INFO] predicting house prices...")

+ 6 - 6
stock/dnn_predict_by_day.py

@@ -35,13 +35,13 @@ def predict(file_path='', model_path='15min_dnn_seq'):
35
 
35
 
36
         up_num = 0
36
         up_num = 0
37
         down_num = 0
37
         down_num = 0
38
-        x = 24 # 每条数据项数
38
+        x = 16 # 每条数据项数
39
         k = 18 # 周期
39
         k = 18 # 周期
40
         for line in lines:
40
         for line in lines:
41
-            v = line[1:x*k + 1]
41
+            v = line[0:x*k]
42
             v = np.array(v)
42
             v = np.array(v)
43
             v = v.reshape(k, x)
43
             v = v.reshape(k, x)
44
-            v = v[:,4:8]
44
+            v = v[:,6:10]
45
             v = v.reshape(1, 4*k)
45
             v = v.reshape(1, 4*k)
46
             # print(v)
46
             # print(v)
47
             r = estimator.predict(v)
47
             r = estimator.predict(v)
@@ -51,7 +51,7 @@ def predict(file_path='', model_path='15min_dnn_seq'):
51
 
51
 
52
             if result[0][3] > 0.5 or result[0][4] > 0.5:
52
             if result[0][3] > 0.5 or result[0][4] > 0.5:
53
                 down_num = down_num + 1
53
                 down_num = down_num + 1
54
-            elif result[0][1] > 0.5 or result[0][2] > 0.5:
54
+            elif result[0][1] > 0.5 or result[0][0] > 0.5:
55
                 up_num = up_num + 0.5  # 乐观调大 悲观调小
55
                 up_num = up_num + 0.5  # 乐观调大 悲观调小
56
 
56
 
57
             # if result[0][0] > 0.5 or result[0][1] > 0.5:
57
             # if result[0][0] > 0.5 or result[0][1] > 0.5:
@@ -76,6 +76,6 @@ if __name__ == '__main__':
76
     # predict(file_path='D:\\data\\quantization\\stock6_5_test.log', model_path='5d_dnn_seq.h5')
76
     # predict(file_path='D:\\data\\quantization\\stock6_5_test.log', model_path='5d_dnn_seq.h5')
77
     # predict(file_path='D:\\data\\quantization\\stock9_18_20200220.log', model_path='18d_dnn_seq.h5')
77
     # predict(file_path='D:\\data\\quantization\\stock9_18_20200220.log', model_path='18d_dnn_seq.h5')
78
     # predict(file_path='D:\\data\\quantization\\stock9_18_2.log', model_path='18d_dnn_seq.h5')
78
     # predict(file_path='D:\\data\\quantization\\stock9_18_2.log', model_path='18d_dnn_seq.h5')
79
-    predict(file_path='D:\\data\\quantization\\stock11_18d_20200303.log', model_path='11_18d_dnn_seq')
80
-    # predict(file_path='D:\\data\\quantization\\stock12_18d_20200226.log', model_path='11_18d_dnn_seq')
79
+    predict(file_path='D:\\data\\quantization\\stock19_18d_20200309.log', model_path='19_18d_dnn_seq')
80
+    # predict(file_path='D:\\data\\quantization\\stock11_18d_20190103_20190604.log', model_path='14_18d_dnn_seq')
81
     # predict(file_path='D:\\data\\quantization\\stock9_18_4.log', model_path='18d_dnn_seq.h5')
81
     # predict(file_path='D:\\data\\quantization\\stock9_18_4.log', model_path='18d_dnn_seq.h5')

+ 1 - 1
stock/dnn_predict_by_stock.py

@@ -105,7 +105,7 @@ def predict(file_path='', model_path='15min_dnn_seq'):
105
                         if today_price['close'] > high_price:
105
                         if today_price['close'] > high_price:
106
                             high_price = today_price['close']
106
                             high_price = today_price['close']
107
 
107
 
108
-        else:
108
+            else:
109
                 if buy == 1:
109
                 if buy == 1:
110
                     init_money = (init_money * (today_price['close'] - last_price)/last_price) + init_money
110
                     init_money = (init_money * (today_price['close'] - last_price)/last_price) + init_money
111
                     if init_money < 8500:
111
                     if init_money < 8500:

+ 8 - 5
stock/dnn_predict_dmi.py

@@ -30,7 +30,9 @@ def _score(fact, line):
30
         up_right = up_right + 1.06
30
         up_right = up_right + 1.06
31
     elif fact[2] == 1:
31
     elif fact[2] == 1:
32
         up_right = up_right + 1
32
         up_right = up_right + 1
33
+        up_error = up_error + 0.5
33
     elif fact[3] == 1:
34
     elif fact[3] == 1:
35
+        up_error = up_error + 1
34
         up_right = up_right + 0.94
36
         up_right = up_right + 0.94
35
     else:
37
     else:
36
         up_error = up_error + 1
38
         up_error = up_error + 1
@@ -83,6 +85,7 @@ def predict(file_path='', model_path='15min_dnn_seq.h5', idx=-1):
83
                     down_error = down_error + 1
85
                     down_error = down_error + 1
84
                     down_right = down_right + 1.12
86
                     down_right = down_right + 1.12
85
                 elif fact[1] == 1:
87
                 elif fact[1] == 1:
88
+                    down_error = down_error + 1
86
                     down_right = down_right + 1.06
89
                     down_right = down_right + 1.06
87
                 elif fact[2] == 1:
90
                 elif fact[2] == 1:
88
                     down_right = down_right + 1
91
                     down_right = down_right + 1
@@ -101,7 +104,7 @@ def predict(file_path='', model_path='15min_dnn_seq.h5', idx=-1):
101
     return win_dnn,up_right/up_num,down_right/down_num
104
     return win_dnn,up_right/up_num,down_right/down_num
102
 
105
 
103
 
106
 
104
-def multi_predict():
107
+def multi_predict(model='14_18d'):
105
     r = 0;
108
     r = 0;
106
     p = 0
109
     p = 0
107
     for x in range(0, 12): # 0,2,3,4,6,8,9,10,11
110
     for x in range(0, 12): # 0,2,3,4,6,8,9,10,11
@@ -110,8 +113,8 @@ def multi_predict():
110
     # for x in [2,4,7,10]: # 2表现最好 优秀的 0,8正确的反向指标,(9错误的反向指标 样本量太少)
113
     # for x in [2,4,7,10]: # 2表现最好 优秀的 0,8正确的反向指标,(9错误的反向指标 样本量太少)
111
         print(x)
114
         print(x)
112
     # for x in [0,2,5,6,7]: # 5表现最好
115
     # for x in [0,2,5,6,7]: # 5表现最好
113
-        win_dnn, up_ratio,down_ratio = predict(file_path='D:\\data\\quantization\\kmeans\\stock13_18d_test_' + str(x) + '.log',
114
-                                               model_path='13_18d_dnn_seq_' + str(x) + '.h5', idx=x)
116
+        win_dnn, up_ratio,down_ratio = predict(file_path='D:\\data\\quantization\\kmeans\\stock' + model + '_test_' + str(x) + '.log',
117
+                                               model_path=model + '_dnn_seq_' + str(x) + '.h5', idx=x)
115
         r = r + up_ratio
118
         r = r + up_ratio
116
         p = p + down_ratio
119
         p = p + down_ratio
117
     print(r, p)
120
     print(r, p)
@@ -198,7 +201,7 @@ def predict_today(day, model='10_18d'):
198
 
201
 
199
 
202
 
200
 if __name__ == '__main__':
203
 if __name__ == '__main__':
201
-    # predict(file_path='D:\\data\\quantization\\stock6_5_test.log', model_path='5d_dnn_seq.h5')
204
+    # predict(file_path='D:\\data\\quantization\\stock16_18d_test.log', model_path='16_18d_cnn_seq.h5')
202
     # predict(file_path='D:\\data\\quantization\\stock6_test.log', model_path='15m_dnn_seq.h5')
205
     # predict(file_path='D:\\data\\quantization\\stock6_test.log', model_path='15m_dnn_seq.h5')
203
-    multi_predict()
206
+    multi_predict(model='19_18d')
204
     # predict_today(20200229, model='11_18d')
207
     # predict_today(20200229, model='11_18d')