train_jqxx2.py 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. #!/usr/bin/python
  2. # -*- coding: UTF-8 -*-
  3. '''
  4. 最简单的mse
  5. '''
  6. import sys
  7. import os
  8. sys.path.append(os.path.abspath('..'))
  9. from util.config import config
  10. import numpy as np
  11. from sklearn.linear_model import LogisticRegression
  12. from sklearn.svm import SVC
  13. from sklearn import metrics
  14. from sklearn.model_selection import train_test_split
  15. from sklearn.metrics import accuracy_score
  16. from imblearn.over_sampling import RandomOverSampler
  17. import joblib
  18. def curce_data(x,y,y_pred):
  19. x=x.tolist()
  20. y=y.tolist()
  21. y_pred=y_pred.tolist()
  22. results=zip(x,y,y_pred)
  23. results=["{},{},{}".format(s[0],s[1][0],s[2][0]) for s in results ]
  24. return results
  25. def read_data(path):
  26. with open(path) as f :
  27. lines=f.readlines()
  28. lines=[eval(line.strip()) for line in lines]
  29. X,z,y=zip(*lines)
  30. X=np.array(X)
  31. y=np.array(y)
  32. return X,y
  33. def demo(file, model_file):
  34. X_train,y_train=read_data(file)
  35. Xtrain, Xtest, Ytrain, Ytest = train_test_split(X_train, y_train, test_size=0.3)
  36. # 一个对象,它代表的线性回归模型,它的成员变量,就已经有了w,b. 刚生成w和b的时候 是随机的
  37. ros = RandomOverSampler(random_state=22)
  38. X_rsampled, y_resampled = ros.fit_resample(Xtrain, Ytrain)
  39. model = LogisticRegression(max_iter=1200)
  40. # 一调用这个函数,就会不停地找合适的w和b 直到误差最小
  41. model.fit(X_rsampled, y_resampled)
  42. # 打印W
  43. # print(model.coef_)
  44. # 打印b
  45. print(model.intercept_)
  46. # 模型已经训练完毕,用模型看下在训练集的表现
  47. # y_pred_train = model.predict(Xtrain)
  48. # # sklearn 求解训练集的mse
  49. # # y_train 在训练集上 真实的y值
  50. # # y_pred_train 通过模型预测出来的y值
  51. # # 计算 (y_train-y_pred_train)^2/n
  52. # train_mse = metrics.mean_squared_error(Ytrain, y_pred_train)
  53. # print("train准确率:", accuracy_score(y_pred_train, Ytrain))
  54. #
  55. # # 看下在测试集上的效果
  56. y_pred_test = model.predict(Xtest)
  57. print("test准确率:", accuracy_score(y_pred_test, Ytest))
  58. # 保存模型
  59. joblib.dump(model, model_file)
  60. def demo_1(file, model_file):
  61. X_train,y_train=read_data(file)
  62. Xtrain, Xtest, Ytrain, Ytest = train_test_split(X_train, y_train, test_size=0.25)
  63. # 随机过采样
  64. ros = RandomOverSampler(random_state=22)
  65. X_rsampled, y_resampled = ros.fit_resample(X_train, y_train)
  66. model = SVC(kernel='linear')
  67. # 一调用这个函数,就会不停地找合适的w和b 直到误差最小
  68. model.fit(X_rsampled, y_resampled)
  69. # 打印W
  70. # print(model.coef_)
  71. # 打印b
  72. print(model.intercept_)
  73. # # 模型已经训练完毕,用模型看下在训练集的表现
  74. # y_pred_train = model.predict(Xtrain)
  75. # # sklearn 求解训练集的mse
  76. # # y_train 在训练集上 真实的y值
  77. # # y_pred_train 通过模型预测出来的y值
  78. # # 计算 (y_train-y_pred_train)^2/n
  79. # train_mse = metrics.mean_squared_error(Ytrain, y_pred_train)
  80. # print("train准确率:", accuracy_score(y_pred_train, Ytrain))
  81. #
  82. # # 看下在测试集上的效果
  83. # y_pred_test = model.predict(Xtest)
  84. # print("test准确率:", accuracy_score(y_pred_test, Ytest))
  85. # # 保存模型
  86. joblib.dump(model, model_file)
  87. from sklearn.ensemble import RandomForestClassifier
  88. def demo_2(file, model_file):
  89. X_train,y_train=read_data(file)
  90. Xtrain, Xtest, Ytrain, Ytest = train_test_split(X_train, y_train, test_size=0.25)
  91. rfc = RandomForestClassifier(random_state=0,n_estimators=20,max_depth=4)
  92. rfc = rfc.fit(Xtrain,Ytrain)
  93. print(rfc.score(Xtest,Ytest))
  94. # 看下在测试集上的效果
  95. y_pred_test = rfc.predict(Xtest)
  96. print("test准确率:", accuracy_score(y_pred_test, Ytest))
  97. # 保存模型
  98. joblib.dump(rfc, model_file)
  99. from sklearn.ensemble import GradientBoostingClassifier
  100. def demo_3(file, model_file):
  101. X_train,y_train=read_data(file)
  102. Xtrain, Xtest, Ytrain, Ytest = train_test_split(X_train, y_train, test_size=0.3)
  103. gbm2 = GradientBoostingClassifier(n_estimators=320, max_depth=6, learning_rate=0.7,
  104. max_features='sqrt', random_state=10)
  105. rfc = gbm2.fit(Xtrain,Ytrain)
  106. print(gbm2.score(Xtest,Ytest))
  107. # 看下在测试集上的效果
  108. y_pred_test = gbm2.predict(Xtest)
  109. print("test准确率:", accuracy_score(y_pred_test, Ytest))
  110. # 保存模型
  111. joblib.dump(gbm2, model_file)
  112. if __name__ == '__main__':
  113. root_dir = 'D:\\data\\quantization\\jqxx2\\'
  114. model_dir = 'D:\\data\\quantization\\jqxx2_svm_model\\'
  115. m = '000001.SH.log' # 12
  116. demo_1(root_dir + m, model_dir + str(m)[:6] + '.pkl')