计算速度优化,摄像头读取逻辑改变,分类器更新。
This commit is contained in:
@@ -29,16 +29,22 @@ def max_pool_2x2(x):
|
||||
CONV1_KERNAL_SIZE = 5
|
||||
|
||||
# 第一层卷积输出通道数
|
||||
CONV1_OUTPUT_CHANNELS = 8
|
||||
CONV1_OUTPUT_CHANNELS = 4
|
||||
|
||||
# 第二层卷积核大小
|
||||
CONV2_KERNAL_SIZE = 3
|
||||
|
||||
# 第二层卷积输出通道数
|
||||
CONV2_OUTPUT_CHANNELS = 16
|
||||
CONV2_OUTPUT_CHANNELS = 6
|
||||
|
||||
# 第三层卷积核大小
|
||||
CONV3_KERNAL_SIZE = 3
|
||||
|
||||
# 第三层卷积输出通道数
|
||||
CONV3_OUTPUT_CHANNELS = 8
|
||||
|
||||
# 第一层全连接宽度
|
||||
FC1_OUTPUT_NODES = 100
|
||||
FC1_OUTPUT_NODES = 50
|
||||
|
||||
# 第二层全连接宽度(输出标签类型数)
|
||||
FC2_OUTPUT_NODES = 15
|
||||
@@ -49,6 +55,7 @@ OUTPUT_NODES = FC2_OUTPUT_NODES
|
||||
|
||||
def forward(x, regularizer=None, keep_rate=tf.constant(1.0)):
|
||||
vars = []
|
||||
vars_name = []
|
||||
nodes = []
|
||||
|
||||
conv1_w = get_weight(
|
||||
@@ -57,7 +64,10 @@ def forward(x, regularizer=None, keep_rate=tf.constant(1.0)):
|
||||
conv1_b = get_bias([CONV1_OUTPUT_CHANNELS])
|
||||
conv1 = tf.nn.relu(tf.nn.bias_add(conv2d(x, conv1_w), conv1_b))
|
||||
pool1 = avg_pool_2x2(conv1)
|
||||
print("conv1: ", conv1.shape)
|
||||
print("pool1: ", pool1.shape)
|
||||
vars.extend([conv1_w, conv1_b])
|
||||
vars_name.extend(["conv1_w", "conv1_b"])
|
||||
nodes.extend([conv1, pool1])
|
||||
|
||||
conv2_w = get_weight(
|
||||
@@ -66,27 +76,41 @@ def forward(x, regularizer=None, keep_rate=tf.constant(1.0)):
|
||||
conv2_b = get_bias([CONV2_OUTPUT_CHANNELS])
|
||||
conv2 = tf.nn.relu(tf.nn.bias_add(conv2d(pool1, conv2_w), conv2_b))
|
||||
pool2 = avg_pool_2x2(conv2)
|
||||
print("conv2: ", conv2.shape)
|
||||
print("pool2: ", pool2.shape)
|
||||
vars.extend([conv2_w, conv2_b])
|
||||
vars_name.extend(["conv2_w", "conv2_b"])
|
||||
nodes.extend([conv2, pool2])
|
||||
|
||||
pool_shape = pool2.get_shape().as_list()
|
||||
node = pool_shape[1] * pool_shape[2] * pool_shape[3]
|
||||
reshaped = tf.reshape(pool2, [-1, node])
|
||||
conv3_w = get_weight(
|
||||
[CONV3_KERNAL_SIZE, CONV3_KERNAL_SIZE, CONV2_OUTPUT_CHANNELS, CONV3_OUTPUT_CHANNELS]
|
||||
)
|
||||
conv3_b = get_bias([CONV3_OUTPUT_CHANNELS])
|
||||
conv3 = tf.nn.relu(tf.nn.bias_add(conv2d(pool2, conv3_w), conv3_b))
|
||||
print("conv3: ", conv3.shape)
|
||||
vars.extend([conv3_w, conv3_b])
|
||||
vars_name.extend(["conv3_w", "conv3_b"])
|
||||
nodes.extend([conv3])
|
||||
|
||||
conv_shape = conv3.get_shape().as_list()
|
||||
node = conv_shape[1] * conv_shape[2] * conv_shape[3]
|
||||
reshaped = tf.reshape(conv3, [-1, node])
|
||||
reshaped = tf.nn.dropout(reshaped, keep_rate)
|
||||
print("reshaped: ", reshaped.shape)
|
||||
|
||||
fc1_w = get_weight([node, FC1_OUTPUT_NODES], regularizer)
|
||||
fc1_b = get_bias([FC1_OUTPUT_NODES])
|
||||
fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_w) + fc1_b)
|
||||
fc1 = tf.nn.dropout(fc1, keep_rate)
|
||||
vars.extend([fc1_w, fc1_b])
|
||||
vars_name.extend(["fc1_w", "fc1_b"])
|
||||
nodes.extend([fc1])
|
||||
|
||||
fc2_w = get_weight([FC1_OUTPUT_NODES, FC2_OUTPUT_NODES], regularizer)
|
||||
fc2_b = get_bias([FC2_OUTPUT_NODES])
|
||||
# fc2 = tf.nn.softmax(tf.matmul(fc1, fc2_w) + fc2_b)
|
||||
fc2 = tf.matmul(fc1, fc2_w) + fc2_b
|
||||
fc2 = tf.matmul(fc1, fc2_w) + fc2_b
|
||||
vars.extend([fc2_w, fc2_b])
|
||||
vars_name.extend(["fc2_w", "fc2_b"])
|
||||
nodes.extend([fc2])
|
||||
|
||||
return nodes, vars
|
||||
return nodes, vars, vars_name
|
||||
|
||||
|
||||
Reference in New Issue
Block a user