Browse Source

模型修正

yufeng 4 years ago
parent
commit
caa4bbae0c
1 changed files with 15 additions and 16 deletions
  1. 15 16
      mix/mix_train.py

+ 15 - 16
mix/mix_train.py

@@ -19,7 +19,7 @@ from keras.models import Model
19
 def read_data(path):
19
 def read_data(path):
20
     lines = []
20
     lines = []
21
     with open(path) as f:
21
     with open(path) as f:
22
-        for x in range(80000): #380000
22
+        for x in range(300000): #380000
23
             lines.append(eval(f.readline().strip()))
23
             lines.append(eval(f.readline().strip()))
24
 
24
 
25
     random.shuffle(lines)
25
     random.shuffle(lines)
@@ -74,18 +74,17 @@ def create_cnn(width, height, depth, filters=(4, 6), kernel_size=(5, 6), regress
74
     # define the model input
74
     # define the model input
75
     inputs = Input(shape=inputShape)
75
     inputs = Input(shape=inputShape)
76
 
76
 
77
-    # loop over the number of filters
78
-    for (i, f) in enumerate(filters):
79
-        # if this is the first CONV layer then set the input
80
-        # appropriately
81
-        if i == 0:
82
-            x = inputs
77
+    x = inputs
83
 
78
 
84
-        # CONV => RELU => BN => POOL
85
-        x = Conv2D(f, kernel_size, padding="same")(x)
86
-        x = Activation("relu")(x)
87
-        x = BatchNormalization(axis=chanDim)(x)
88
-        # x = MaxPooling2D(pool_size=(2, 2))(x)
79
+    # CONV => RELU => BN => POOL
80
+    x = Conv2D(32, kernel_size, strides=2, padding="same")(x)
81
+    x = Activation("relu")(x)
82
+    x = BatchNormalization(axis=chanDim)(x)
83
+    # x = MaxPooling2D(pool_size=(2, 2))(x)
84
+    # if width > 2:
85
+    #     x = Conv2D(32, (10, 6), padding="same")(x)
86
+    #     x = Activation("relu")(x)
87
+    #     x = BatchNormalization(axis=chanDim)(x)
89
 
88
 
90
     # flatten the volume, then FC => RELU => BN => DROPOUT
89
     # flatten the volume, then FC => RELU => BN => DROPOUT
91
     x = Flatten()(x)
90
     x = Flatten()(x)
@@ -112,8 +111,8 @@ def create_cnn(width, height, depth, filters=(4, 6), kernel_size=(5, 6), regress
112
 
111
 
113
 # create the MLP and CNN models
112
 # create the MLP and CNN models
114
 mlp = create_mlp(train_x_c.shape[1], regress=False)
113
 mlp = create_mlp(train_x_c.shape[1], regress=False)
115
-cnn_0 = create_cnn(18, 18, 1, kernel_size=(6, 6), regress=False, output=256)
116
-cnn_1 = create_cnn(18, 2, 1, kernel_size=(6,2), regress=False, output=36)
114
+cnn_0 = create_cnn(18, 18, 1, kernel_size=(10, 6), regress=False, output=256)
115
+cnn_1 = create_cnn(18, 2, 1, kernel_size=(10,2), regress=False, output=36)
117
 
116
 
118
 # create the input to our final set of layers as the *output* of both
117
 # create the input to our final set of layers as the *output* of both
119
 # the MLP and CNN
118
 # the MLP and CNN
@@ -149,8 +148,8 @@ model.fit(
149
     [train_x_c, train_x_a, train_x_b], train_y,
148
     [train_x_c, train_x_a, train_x_b], train_y,
150
     # validation_data=([testAttrX, testImagesX], testY),
149
     # validation_data=([testAttrX, testImagesX], testY),
151
     # epochs=int(3*train_x_a.shape[0]/1300),
150
     # epochs=int(3*train_x_a.shape[0]/1300),
152
-    epochs=120,
153
-    batch_size=4096, shuffle=True)
151
+    epochs=100,
152
+    batch_size=2048, shuffle=True)
154
 
153
 
155
 test_x_a = test_x[:,:18*18]
154
 test_x_a = test_x[:,:18*18]
156
 test_x_a = test_x_a.reshape(test_x.shape[0], 18, 18, 1)
155
 test_x_a = test_x_a.reshape(test_x.shape[0], 18, 18, 1)