Browse Source

大盘超好的时候可以用这种

yufeng 4 years ago
parent
commit
6c9a7c2f24
4 changed files with 130 additions and 20 deletions
  1. 6 5
      week/predict_everyweek_100.py
  2. 100 0
      week/predict_everyweek_113.py
  3. 5 5
      week/week_predict_100.py
  4. 19 10
      week/week_train_100.py

+ 6 - 5
week/predict_everyweek_100.py

@@ -22,7 +22,7 @@ O_list = []  # 其他
22 22
 
23 23
 
24 24
 def predict_today(file, day, model='10_18d', log=True):
25
-    industry_list = get_hot_industry(day)
25
+    # industry_list = get_hot_industry(day)
26 26
 
27 27
     lines = []
28 28
     with open(file) as f:
@@ -70,11 +70,12 @@ def predict_today(file, day, model='10_18d', log=True):
70 70
                             concept_detail_list.append(c['name'])
71 71
 
72 72
             if log is True:
73
-                with open('D:\\data\\quantization\\predict\\' + str(day) + '_week_100.txt', mode='a', encoding="utf-8") as f:
74
-                    f.write(str(line[-1]) + ' ' + stock['name'] + ' ' + stock['sw_industry'] + ' ' + str(concept_detail_list) + ' ' + str(result[0][0]) + '\n')
73
+                with open('D:\\data\\quantization\\predict\\' + str(day) + '_week_103A.txt', mode='a', encoding="utf-8") as f:
74
+                    f.write(str(line[-1]) + ' ' + stock['name'] + ' ' + stock['sw_industry'] + ' A ' + str(concept_detail_list) + ' ' + str(result[0][0]) + '\n')
75 75
 
76 76
         elif result[0][1] > 0.5:
77
-            pass
77
+            with open('D:\\data\\quantization\\predict\\' + str(day) + '_week_103A.txt', mode='a', encoding="utf-8") as f:
78
+                f.write(str(line[-1]) + ' ' + stock['name'] + ' ' + stock['sw_industry'] + ' B ' + str(result[0][1]) + '\n')
78 79
         elif result[0][2] > 0.5:
79 80
             if stock['ts_code'] in holder_stock_list:
80 81
                 print(stock['ts_code'], stock['name'], '警告危险')
@@ -97,4 +98,4 @@ def predict_today(file, day, model='10_18d', log=True):
97 98
 if __name__ == '__main__':
98 99
     # 策略B
99 100
     # predict_today("D:\\data\\quantization\\stock505_28d_20200416.log", 20200416, model='505_28d_mix_5D_ma5_s_seq.h5', log=True)
100
-    predict_today("D:\\data\\quantization\\week101_18d_20200421.log", 20200421, model='103_18d_mix_3W_s_seq.h5', log=True)
101
+    predict_today("D:\\data\\quantization\\week101_18d_20191213.log", 20191213, model='103_18d_mix_3W_s_seqA.h5', log=True)

+ 100 - 0
week/predict_everyweek_113.py

@@ -0,0 +1,100 @@
1
+# -*- encoding:utf-8 -*-
2
+import numpy as np
3
+from keras.models import load_model
4
+import random
5
+from mix.stock_source import *
6
+import pymongo
7
+from util.mongodb import get_mongo_table_instance
8
+
9
+code_table = get_mongo_table_instance('tushare_code')
10
+k_table = get_mongo_table_instance('stock_day_k')
11
+stock_concept_table = get_mongo_table_instance('tushare_concept_detail')
12
+all_concept_code_list = list(get_mongo_table_instance('tushare_concept').find({}))
13
+
14
+
15
+gainian_map = {}
16
+hangye_map = {}
17
+
18
+
19
+Z_list = []  # 自选
20
+R_list = []  #  ROE
21
+O_list = []  # 其他
22
+
23
+
24
+def predict_today(file, day, model='10_18d', log=True):
25
+    # industry_list = get_hot_industry(day)
26
+
27
+    lines = []
28
+    with open(file) as f:
29
+        for line in f.readlines()[:]:
30
+            line = eval(line.strip())
31
+            lines.append(line)
32
+
33
+    size = len(lines[0])
34
+
35
+    model=load_model(model)
36
+
37
+    row = 18
38
+    col = 9
39
+
40
+    for line in lines:
41
+        train_x = np.array([line[:size - 1]])
42
+
43
+        train_x_a = train_x[:,:row*col]
44
+        train_x_a = train_x_a.reshape(train_x.shape[0], row, col, 1)
45
+        train_x_b = train_x[:, row*col:row*col + 11*14]
46
+        train_x_b = train_x_b.reshape(train_x.shape[0], 11, 14, 1)
47
+        train_x_c = train_x[:,row*col + 11*14:]
48
+
49
+        result = model.predict([train_x_c, train_x_a, train_x_b])
50
+        # print(result, line[-1])
51
+        stock = code_table.find_one({'ts_code':line[-1][0]})
52
+
53
+        if result[0][0] > 0.85:
54
+            if line[-1][0].startswith('688'):
55
+                continue
56
+            # 去掉ST
57
+            if stock['name'].startswith('ST') or stock['name'].startswith('N') or stock['name'].startswith('*'):
58
+                continue
59
+
60
+            if stock['ts_code'] in ROE_stock_list or stock['ts_code'] in zeng_stock_list:
61
+                R_list.append([stock['ts_code'], stock['name']])
62
+
63
+            print(stock['ts_code'], stock['name'], 'zhang10')
64
+
65
+            concept_code_list = list(stock_concept_table.find({'ts_code':stock['ts_code']}))
66
+            concept_detail_list = []
67
+
68
+            if len(concept_code_list) > 0:
69
+                for concept in concept_code_list:
70
+                    for c in all_concept_code_list:
71
+                        if c['code'] == concept['concept_code']:
72
+                            concept_detail_list.append(c['name'])
73
+
74
+            if log is True:
75
+                with open('D:\\data\\quantization\\predict\\' + str(day) + '_week_119.txt', mode='a', encoding="utf-8") as f:
76
+                    f.write(str(line[-1]) + ' ' + stock['name'] + ' ' + stock['sw_industry'] + ' ' + str(concept_detail_list) + ' ' + str(result[0][0]) + '\n')
77
+
78
+        elif result[0][1] > 0.7:
79
+            print(stock['ts_code'], stock['name'], 'zhang5')
80
+        elif result[0][2] > 0.5:
81
+            pass
82
+        elif result[0][3] > 0.5:
83
+            pass
84
+        else:
85
+            pass
86
+
87
+    # print(gainian_map)
88
+    # print(hangye_map)
89
+    random.shuffle(O_list)
90
+    print(O_list[:3])
91
+
92
+    random.shuffle(R_list)
93
+    print('----ROE----')
94
+    print(R_list[:3])
95
+
96
+
97
+if __name__ == '__main__':
98
+    # 策略B
99
+    # predict_today("D:\\data\\quantization\\stock505_28d_20200416.log", 20200416, model='505_28d_mix_5D_ma5_s_seq.h5', log=True)
100
+    predict_today("D:\\data\\quantization\\week119_18d_20200403.log", 20200410, model='119_18d_mix_3W_s_seqA.h5', log=True)

+ 5 - 5
week/week_predict_100.py

@@ -40,9 +40,9 @@ def predict(file_path='', model_path='15min_dnn_seq.h5', idx=-1, row=18, col=20)
40 40
 
41 41
     test_x_a = test_x[:,:row*col]
42 42
     test_x_a = test_x_a.reshape(test_x.shape[0], row, col, 1)
43
-    test_x_b = test_x[:, row*col:row*col + 11*16]
44
-    test_x_b = test_x_b.reshape(test_x.shape[0],11, 16, 1)
45
-    test_x_c = test_x[:,row*col + 11*16:]
43
+    test_x_b = test_x[:, row*col:row*col + 11*13]
44
+    test_x_b = test_x_b.reshape(test_x.shape[0],11, 13, 1)
45
+    test_x_c = test_x[:,row*col + 11*13:]
46 46
 
47 47
     model=load_model(model_path)
48 48
     score = model.evaluate([test_x_c, test_x_a, test_x_b], test_y)
@@ -145,5 +145,5 @@ def random_predict(file_path=''):
145 145
 
146 146
 
147 147
 if __name__ == '__main__':
148
-    predict(file_path='D:\\data\\quantization\\week113_18d_test.log', model_path='113_18d_mix_3W_s_seq.h5', row=18, col=6)
149
-    # random_predict(file_path='D:\\data\\quantization\\week109/_18d_test.log')
148
+    predict(file_path='D:\\data\\quantization\\week120_18d_test.log', model_path='120_18d_mix_3W_s_seqA.h5', row=18, col=9)
149
+    # random_predict(file_path='D:\\data\\quantization\\week118_18d_test.log')

+ 19 - 10
week/week_train_100.py

@@ -16,18 +16,18 @@ from keras.callbacks import EarlyStopping
16 16
 
17 17
 early_stopping = EarlyStopping(monitor='accuracy', patience=5, verbose=2)
18 18
 
19
-epochs= 38
19
+epochs= 77
20 20
 # size = 24000 #共68W
21
-file_path = 'D:\\data\\quantization\\week112_18d_train1.log'
22
-model_path = '112_18d_mix_3W_s_seq.h5'
21
+file_path = 'D:\\data\\quantization\\week120_18d_train1.log'
22
+model_path = '120_18d_mix_3W_s_seqA.h5'
23 23
 row = 18
24
-col = 11
25
-col1 = 17
24
+col = 9
25
+col1 = 13
26 26
 '''
27 27
 0   18-3                    18*11           25,102,47-29
28 28
 1   18W预测3周后最高价+pe   18*11           37,101,44-22
29 29
 2                           18*11 + 11*16   33,101,41-30
30
-3   stripe=1,win=4-3        18*11 + 11*16   38,104,32-20  ----- 随机25,100,51-26            
30
+3   stripe=1,win=4-3        18*11 + 11*16   31,108,19-34  ----- 随机25,100,51-26            
31 31
 4   stripe=1,win=3          18*11 + 11*16   34,103,41-26
32 32
 5   stripe=1,win=3          18*11           
33 33
 6   用ma来衡量    
@@ -37,8 +37,17 @@ col1 = 17
37 37
 10  ma5-大盘相关+alpha_53+alpha_18           48,97,61-06
38 38
 
39 39
 11  high-大盘相关+alpha_53+alpha_18                             35,103,39-37
40
-12  high-大盘相关+alpha_53+alpha_18(每日)    18*11 + 11*17      
40
+12  high-大盘相关+alpha_53+alpha_18(每日)    18*11 + 11*17      33,101,46-30
41 41
 13  high-大盘相关+alpha_53+alpha_18-dmi      18*6 + 11*16       37,105,33-32
42
+
43
+14  high-大盘相关+alpha_53+alpha_18-dmi+result修改(自己对比)    18*6 + 11*16    17,97,59
44
+15  high-大盘相关+alpha_53+alpha_18+result修改-波动-ma+dmi      18*9 + 11*14    26,99,53-22   !!!
45
+16  high-pettm                                                  18*9 + 11*13    29,99,54-26
46
+17  high+大盘相关                                               18*9 + 11*13    26,98,54-27
47
+
48
+18  high-大盘相关+alpha_53+alpha_18+result修改-波动-ma+dmi 8周后     18*9 + 11*14     19,111,8,55   16    ----- 随机24,100,49      
49
+19  high-大盘相关+alpha_53+alpha_18+result修改-波动-ma+dmi 4周后     18*9 + 11*14     26,113,2,22   73条数据 
50
+20  high-大盘相关+alpha_53+alpha_18+result修改-波动-ma+双dmi 4周后   18*9 + 11*13     32,110,11,26  大盘超好的时候可以用这种
42 51
 '''
43 52
 
44 53
 def read_data(path):
@@ -101,7 +110,7 @@ def create_mlp(dim, regress=False):
101 110
     return model
102 111
 
103 112
 
104
-def create_cnn(width, height, depth, size=48, kernel_size=(5, 6), regress=False, output=24):
113
+def create_cnn(width, height, depth, size=48, kernel_size=(5, 6), regress=False, output=24, strides=2):
105 114
     # initialize the input shape and channel dimension, assuming
106 115
     # TensorFlow/channels-last ordering
107 116
     inputShape = (width, height, 1)
@@ -111,7 +120,7 @@ def create_cnn(width, height, depth, size=48, kernel_size=(5, 6), regress=False,
111 120
     inputs = Input(shape=inputShape)
112 121
     # x = inputs
113 122
     # CONV => RELU => BN => POOL
114
-    x = Conv2D(size, kernel_size, strides=2, padding="same")(inputs)
123
+    x = Conv2D(size, kernel_size, strides=strides, padding="same")(inputs)
115 124
     x = Activation("relu")(x)
116 125
     x = BatchNormalization(axis=chanDim)(x)
117 126
 
@@ -148,7 +157,7 @@ mlp = create_mlp(train_x_c.shape[1], regress=False)
148 157
 cnn_0 = create_cnn(row, col, 1, kernel_size=(4, col), size=66, regress=False, output=66)         # 29 98 47
149 158
 # cnn_0 = create_cnn(18, 20, 1, kernel_size=(9, 9), size=90, regress=False, output=96)         # 28 97 53
150 159
 # cnn_0 = create_cnn(18, 20, 1, kernel_size=(3, 20), size=90, regress=False, output=96)
151
-cnn_1 = create_cnn(11, col1, 1, kernel_size=(3, col1), size=66, regress=False, output=66)
160
+cnn_1 = create_cnn(11, col1, 1, kernel_size=(3, col1), size=66, regress=False, output=66, strides=1)
152 161
 # cnn_1 = create_cnn(9, 26, 1, kernel_size=(2, 14), size=36, regress=False, output=64)
153 162
 
154 163
 # create the input to our final set of layers as the *output* of both