# -*- encoding:utf-8 -*- import numpy as np from keras.models import load_model import joblib def read_data(path): day_lines = {} with open(path) as f: for line in f.readlines()[:]: line = eval(line.strip()) date = str(line[-1][-1]) if date in day_lines: day_lines[date].append(line) else: day_lines[date] = [line] # print(len(day_lines['20191230'])) return day_lines def predict(file_path='', model_path='15min_dnn_seq', rows=18, cols=18): day_lines = read_data(file_path) print('数据读取完毕') model=load_model(model_path + '.h5') print('模型加载完毕') items = sorted(day_lines.keys()) for key in items: # print(day) lines = day_lines[key] up_num = 0 down_num = 0 size = len(lines[0]) x0 = 0 x1 = 0 x2 = 0 x3 = 0 x4 = 0 for line in lines: train_x = np.array([line[:size - 1]]) train_x_a = train_x[:,:rows*cols] train_x_a = train_x_a.reshape(train_x.shape[0], rows, cols, 1) # train_x_b = train_x[:, 18*18:18*18+2*18] # train_x_b = train_x_b.reshape(train_x.shape[0], 18, 2, 1) train_x_c = train_x[:,rows*cols:] result = model.predict([train_x_c, train_x_a]) ratio = 1 if train_x_c[0][-1] == 1: ratio = 2 elif train_x_c[0][-2] == 1: ratio = 1.6 elif train_x_c[0][-3] == 1: ratio = 1.3 if result[0][0]> 0.5: up_num = up_num + ratio elif result[0][1] > 0.5: up_num = up_num + 0.4*ratio elif result[0][2] > 0.5: down_num = down_num + 0.4*ratio else: down_num = down_num + ratio maxx = max(result[0]) if maxx - result[0][0] == 0: x0 = x0 + 1 if maxx - result[0][1] == 0: x1 = x1 + 1 if maxx - result[0][2] == 0: x2 = x2 + 1 if maxx - result[0][3] == 0: x3 = x3 + 1 # print(key, int(up_num), int(down_num), (down_num*1.2 + 2)/(up_num*1.2 + 2), ) print(key, x0, x1, x2,x3, (down_num*1.5 + 2)/(up_num*1.2 + 2)) import datetime if __name__ == '__main__': today = datetime.datetime.now() today = today today = today.strftime('%Y%m%d') # predict(file_path='D:\\data\\quantization\\stock6_5_test.log', model_path='5d_dnn_seq.h5') # predict(file_path='D:\\data\\quantization\\stock9_18_20200220.log', model_path='18d_dnn_seq.h5') # predict(file_path='D:\\data\\quantization\\stock9_18_2.log', model_path='18d_dnn_seq.h5') # predict(file_path='D:\\data\\quantization\\stock16_18d_20200310.log', model_path='16_18d_mix_seq') # predict(file_path='D:\\data\\quantization\\stock196_18d_20200326.log', model_path='196_18d_mix_6D_ma5_s_seq') # predict(file_path='D:\\data\\quantization\\stock321_28d_5D_20200429.log', model_path='321_28d_mix_5D_ma5_s_seq_2', rows=28, cols=20) predict(file_path='D:\\data\\quantization\\stock327_28d_' + today + '.log', model_path='327_28d_mix_5D_ma5_s_seq', rows=28, cols=20) # predict(file_path='D:\\data\\quantization\\stock9_18_4.log', model_path='18d_dnn_seq.h5') # predict(file_path='D:\\data\\quantization\\stock324_28d_3D_20200414_A.log', model_path='324_28d_mix_5D_ma5_s_seq', rows=28, cols=18) # predict(file_path='D:\\data\\quantization\\stock324_28d_3D_20200414_A.log', model_path='603_30d_mix_5D_ma5_s_seq', rows=30, cols=19)