dnn_train.py 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. import keras
  2. # -*- encoding:utf-8 -*-
  3. import numpy as np
  4. from keras.models import Sequential
  5. from keras.layers import Dense,Dropout
  6. import random
  7. from keras import regularizers
  8. from keras.models import load_model
  9. from imblearn.over_sampling import RandomOverSampler
  10. import joblib
  11. import tensorflow
  12. def read_data(path):
  13. lines = []
  14. with open(path) as f:
  15. # for x in range(30000):
  16. # lines.append(eval(f.readline().strip()))
  17. for line in f.readlines()[:]:
  18. lines.append(eval(line.strip()))
  19. random.shuffle(lines)
  20. print('读取数据完毕')
  21. d=int(0.95*len(lines))
  22. size = len(lines[0])
  23. train_x=[s[:size - 2] for s in lines[0:d]]
  24. train_y=[s[size-1] for s in lines[0:d]]
  25. test_x=[s[:size - 2] for s in lines[d:]]
  26. test_y=[s[size-1] for s in lines[d:]]
  27. print('转换数据完毕')
  28. ros = RandomOverSampler(random_state=0)
  29. X_resampled, y_resampled = ros.fit_sample(np.array(train_x), np.array(train_y))
  30. print('数据重采样完毕')
  31. return X_resampled,y_resampled,np.array(test_x),np.array(test_y)
  32. def resample(path):
  33. lines = []
  34. with open(path) as f:
  35. for x in range(330000):
  36. lines.append(eval(f.readline().strip()))
  37. estimator = joblib.load('km.pkl')
  38. x = 17
  39. for line in lines:
  40. v = line[1:x*10 + 1]
  41. v = np.array(v)
  42. v = v.reshape(10, x)
  43. v = v[:,0:4]
  44. v = v.reshape(1, 40)
  45. # print(v)
  46. r = estimator.predict(v)
  47. with open('D:\\data\\quantization\\kmeans\\stock8_14_train_' + str(r[0]) + '.log', 'a') as f:
  48. f.write(str(line) + '\n')
  49. def mul_train():
  50. # for x in range(0, 16):
  51. for x in [0]:
  52. train(input_dim=176, result_class=5, file_path="D:\\data\\quantization\\kmeans\\stock2_10_" + str(x) + ".log",
  53. model_name='5d_dnn_seq_' + str(x) + '.h5')
  54. def train(input_dim=400, result_class=3, file_path="D:\\data\\quantization\\stock6.log", model_name=''):
  55. train_x,train_y,test_x,test_y=read_data(file_path)
  56. model = Sequential()
  57. model.add(Dense(units=120+input_dim, input_dim=input_dim, activation='relu'))
  58. model.add(Dense(units=120+input_dim, activation='relu',kernel_regularizer=regularizers.l1(0.001)))
  59. model.add(Dense(units=120+input_dim, activation='relu'))
  60. model.add(Dropout(0.1))
  61. model.add(Dense(units=120 + input_dim, activation='relu'))
  62. model.add(Dropout(0.2))
  63. # model.add(Dense(units=120+input_dim, activation='selu'))
  64. # model.add(Dropout(0.1))
  65. # model.add(Dense(units=60+input_dim, activation='selu'))
  66. # model.add(Dropout(0.2))
  67. model.add(Dense(units=512, activation='relu'))
  68. model.add(Dense(units=result_class, activation='softmax'))
  69. model.compile(loss='categorical_crossentropy', optimizer="adam",metrics=['accuracy'])
  70. print("Starting training ")
  71. # model.fit(train_x, train_y, batch_size=1024, epochs=400 + 4*int(len(train_x)/1000), shuffle=True)
  72. model.fit(train_x, train_y, batch_size=2048, epochs=900 + 9*int(len(train_x)/700), shuffle=True)
  73. score = model.evaluate(test_x, test_y)
  74. print(score)
  75. print('Test score:', score[0])
  76. print('Test accuracy:', score[1])
  77. model.save(model_name)
  78. # model=None
  79. # model=load_model(model_name)
  80. # result=model.predict(test_x)
  81. # print(result)
  82. # print(test_y)
  83. if __name__ == '__main__':
  84. # train(input_dim=176, result_class=5, file_path="D:\\data\\quantization\\stock6_5.log", model_name='5d_dnn_seq.h5')
  85. # train(input_dim=400, result_class=3, file_path="D:\\data\\quantization\\stock6.log", model_name='15m_dnn_seq.h5')
  86. resample('D:\\data\\quantization\\stock8_14.log')
  87. # mul_train()