lstm_kmeans_train.py 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. import keras
  2. # -*- encoding:utf-8 -*-
  3. import numpy as np
  4. from keras.models import Sequential
  5. # 优化方法选用Adam(其实可选项有很多,如SGD)
  6. from keras.optimizers import Adam
  7. import random
  8. from keras.models import load_model
  9. from imblearn.over_sampling import RandomOverSampler
  10. from keras.utils import np_utils
  11. # 用于模型初始化,Conv2D模型初始化、Activation激活函数,MaxPooling2D是池化层
  12. # Flatten作用是将多位输入进行一维化
  13. # Dense是全连接层
  14. from keras.layers import Conv2D, Activation, MaxPool2D, Flatten, Dense,Dropout,Input,MaxPooling2D,BatchNormalization,concatenate
  15. from keras.layers import LSTM
  16. from keras import regularizers
  17. from keras.models import Model
  18. from keras.callbacks import EarlyStopping
  19. early_stopping = EarlyStopping(monitor='accuracy', patience=5, verbose=2)
  20. epochs= 330
  21. size = 580000 # 61W
  22. file_path = 'D:\\data\\quantization\\stock160_18d_train.log'
  23. model_path = '160_18d_lstm_5D_ma5_s_seq.h5'
  24. data_dir = 'D:\\data\\quantization\\'
  25. def read_data(path):
  26. lines = []
  27. with open(path) as f:
  28. for line in f.readlines()[:]:
  29. lines.append(eval(line.strip()))
  30. random.shuffle(lines)
  31. print('读取数据完毕')
  32. d=int(0.7*len(lines))
  33. train_x=[s[:-2] for s in lines[0:d]]
  34. train_y=[s[-1] for s in lines[0:d]]
  35. test_x=[s[:-2] for s in lines[d:]]
  36. test_y=[s[-1] for s in lines[d:]]
  37. print('转换数据完毕')
  38. ros = RandomOverSampler(random_state=0)
  39. X_resampled, y_resampled = ros.fit_sample(np.array(train_x), np.array(train_y))
  40. print('数据重采样完毕')
  41. return X_resampled,y_resampled,np.array(test_x),np.array(test_y)
  42. def mul_train(name="10_18d"):
  43. for x in range(0, 8):
  44. score = train(data_dir + 'kmeans\\' + name + "_trai_" + str(x) + ".log", x) # stock160_18d_trai_0
  45. with open(data_dir + name + '_lstm.log', 'a') as f:
  46. f.write(str(x) + ':' + str(score[1]) + '\n')
  47. def train(file_path, idx):
  48. train_x,train_y,test_x,test_y=read_data(file_path)
  49. train_x_a = train_x[:,:18*24]
  50. train_x_a = train_x_a.reshape(train_x.shape[0], 18, 24)
  51. # train_x_b = train_x[:, 18*24:18*16+10*18]
  52. # train_x_b = train_x_b.reshape(train_x.shape[0], 18, 10, 1)
  53. train_x_c = train_x[:,18*24:]
  54. # create the MLP and CNN models
  55. mlp = create_mlp(train_x_c.shape[1], regress=False)
  56. cnn_0 = create_lstm(train_x_a.shape[1], 18, 24)
  57. # cnn_1 = create_cnn(18, 10, 1, kernel_size=(3, 5), filters=32, regress=False, output=120)
  58. # create the input to our final set of layers as the *output* of both
  59. # the MLP and CNN
  60. combinedInput = concatenate([mlp.output, cnn_0.output,])
  61. # our final FC layer head will have two dense layers, the final one
  62. # being our regression head
  63. x = Dense(256, activation="relu", kernel_regularizer=regularizers.l1(0.003))(combinedInput)
  64. x = Dropout(0.2)(x)
  65. x = Dense(256, activation="relu")(x)
  66. x = Dense(512, activation="relu")(x)
  67. # 在建设一层
  68. x = Dense(5, activation="softmax")(x)
  69. # our final model will accept categorical/numerical data on the MLP
  70. # input and images on the CNN input, outputting a single value (the
  71. # predicted price of the house)
  72. model = Model(inputs=[mlp.input, cnn_0.input,], outputs=x)
  73. print("Starting training ")
  74. # h = model.fit(train_x, train_y, batch_size=4096*2, epochs=500, shuffle=True)
  75. # compile the model using mean absolute percentage error as our loss,
  76. # implying that we seek to minimize the absolute percentage difference
  77. # between our price *predictions* and the *actual prices*
  78. opt = Adam(lr=1e-3, decay=1e-3 / 200)
  79. model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=['accuracy'],
  80. )
  81. # train the model
  82. print("[INFO] training model...")
  83. model.fit(
  84. [train_x_c, train_x_a, ], train_y,
  85. # validation_data=([testAttrX, testImagesX], testY),
  86. # epochs=int(3*train_x_a.shape[0]/1300),
  87. epochs=epochs,
  88. batch_size=4096, shuffle=True,
  89. callbacks=[early_stopping]
  90. )
  91. test_x_a = test_x[:,:18*24]
  92. test_x_a = test_x_a.reshape(test_x.shape[0], 18, 24)
  93. # test_x_b = test_x[:, 18*16:18*16+10*18]
  94. # test_x_b = test_x_b.reshape(test_x.shape[0], 18, 10, 1)
  95. test_x_c = test_x[:,18*24:]
  96. # make predictions on the testing data
  97. print("[INFO] predicting house prices...")
  98. score = model.evaluate([test_x_c, test_x_a], test_y)
  99. print(score)
  100. print('Test score:', score[0])
  101. print('Test accuracy:', score[1])
  102. model.save(model_path.split('.')[0] + '_' + str(idx) + '.h5')
  103. return score
  104. def create_mlp(dim, regress=False):
  105. # define our MLP network
  106. model = Sequential()
  107. model.add(Dense(64, input_dim=dim, activation="relu"))
  108. model.add(Dense(64, activation="relu"))
  109. # check to see if the regression node should be added
  110. if regress:
  111. model.add(Dense(1, activation="linear"))
  112. # return our model
  113. return model
  114. def create_cnn(width, height, depth, filters=32, kernel_size=(5, 6), regress=False, output=24):
  115. # initialize the input shape and channel dimension, assuming
  116. # TensorFlow/channels-last ordering
  117. inputShape = (width, height, 1)
  118. chanDim = -1
  119. # define the model input
  120. inputs = Input(shape=inputShape)
  121. x = inputs
  122. # CONV => RELU => BN => POOL
  123. x = Conv2D(filters, kernel_size, strides=(2,2), padding="same",
  124. # data_format='channels_first'
  125. )(x)
  126. x = Activation("relu")(x)
  127. x = BatchNormalization(axis=chanDim)(x)
  128. # x = MaxPooling2D(pool_size=(2, 2))(x)
  129. # if width > 2:
  130. # x = Conv2D(32, (10, 6), padding="same")(x)
  131. # x = Activation("relu")(x)
  132. # x = BatchNormalization(axis=chanDim)(x)
  133. # flatten the volume, then FC => RELU => BN => DROPOUT
  134. x = Flatten()(x)
  135. x = Dense(output)(x)
  136. x = Activation("relu")(x)
  137. x = BatchNormalization(axis=chanDim)(x)
  138. x = Dropout(0.2)(x)
  139. # apply another FC layer, this one to match the number of nodes
  140. # coming out of the MLP
  141. x = Dense(output)(x)
  142. x = Activation("relu")(x)
  143. # check to see if the regression node should be added
  144. if regress:
  145. x = Dense(1, activation="linear")(x)
  146. # construct the CNN
  147. model = Model(inputs, x)
  148. # return the CNN
  149. return model
  150. def create_lstm(sample, timesteps, input_dim):
  151. inputShape = (timesteps, input_dim)
  152. # define the model input
  153. inputs = Input(shape=inputShape)
  154. x = inputs
  155. x = LSTM(units = 64, input_shape=(timesteps, input_dim), dropout=0.2
  156. )(x)
  157. # x = LSTM(16*16, return_sequences=False)
  158. # x = Activation("relu")(x)
  159. x = Dense(64)(x)
  160. x = Dropout(0.2)(x)
  161. x = Activation("relu")(x)
  162. # construct the CNN
  163. model = Model(inputs, x)
  164. # return the CNN
  165. return model
  166. if __name__ == '__main__':
  167. mul_train('stock160_18d')