在张量流中连接vgg19的问题

安德鲁

我在连接VGG19神经网络时遇到问题。这是整个图:

class VGG(object):
    def __init__(self, n_classes=252):

        self.input_data = tf.placeholder(dtype=tf.float32, name='input_data', shape=(5, 224, 224, 3))

        with tf.variable_scope("group1"):
            with tf.variable_scope("conv1"):
                self.weights1 = tf.get_variable('weights',[3, 3, 3, 64], initializer=tf.random_normal_initializer(stddev=1e-2))
                self.bias1 = tf.get_variable('bias', [64], initializer=tf.constant_initializer(0.0))
                self.convolve1 = tf.nn.conv2d(self.input_data, self.weights1, [1, 1, 1, 1], padding="SAME")
                self.conv1 = tf.nn.elu(tf.nn.bias_add(self.convolve1, self.bias1))
            with tf.variable_scope("conv2"):
                self.weights2= tf.get_variable('weights', [3, 3, 64, 64],
                                                initializer=tf.random_normal_initializer(stddev=1e-2))
                self.bias2 = tf.get_variable('bias', [64], initializer=tf.constant_initializer(0.0))
                self.convolve2 = tf.nn.conv2d(self.conv1, self.weights2, [1, 1, 1, 1], padding="SAME")
                self.conv2 = tf.nn.elu(tf.nn.bias_add(self.convolve2, self.bias2))

            self.pool1 = tf.nn.max_pool(self.conv2, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME', name='pool1')

        with tf.variable_scope("group2"):
            with tf.variable_scope("conv3"):
                self.weights3 = tf.get_variable('weights', [3, 3, 64, 128],
                                                initializer=tf.random_normal_initializer(stddev=1e-2))
                self.bias3 = tf.get_variable('bias', [128], initializer=tf.constant_initializer(0.0))
                self.convolve3 = tf.nn.conv2d(self.pool1, self.weights3, [1, 1, 1, 1], padding="SAME")
                self.conv3 = tf.nn.elu(tf.nn.bias_add(self.convolve3, self.bias3))
            with tf.variable_scope("conv4"):
                self.weights4 = tf.get_variable('weights', [3, 3, 128, 128],
                                                initializer=tf.random_normal_initializer(stddev=1e-2))
                self.bias4 = tf.get_variable('bias', [128], initializer=tf.constant_initializer(0.0))
                self.convolve4 = tf.nn.conv2d(self.conv3, self.weights4, [1, 1, 1, 1], padding="SAME")
                self.conv4 = tf.nn.elu(tf.nn.bias_add(self.convolve4, self.bias4))
            self.pool2 = tf.nn.max_pool(self.conv4, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME', name='pool2')

        with tf.variable_scope("group3"):
            with tf.variable_scope("conv5"):
                self.weights5 = tf.get_variable('weights', [3, 3, 128, 256],
                                                initializer=tf.random_normal_initializer(stddev=1e-2))
                self.bias5 = tf.get_variable('bias', [256], initializer=tf.constant_initializer(0.0))
                self.convolve5 = tf.nn.conv2d(self.pool2, self.weights5, [1, 1, 1, 1], padding="SAME")
                self.conv5 = tf.nn.elu(tf.nn.bias_add(self.convolve5, self.bias5))
            with tf.variable_scope("conv6"):
                self.weights6 = tf.get_variable('weights', [3, 3, 256, 256],
                                                initializer=tf.random_normal_initializer(stddev=1e-2))
                self.bias6 = tf.get_variable('bias', [256], initializer=tf.constant_initializer(0.0))
                self.convolve6 = tf.nn.conv2d(self.conv5, self.weights6, [1, 1, 1, 1], padding="SAME")
                self.conv6 = tf.nn.elu(tf.nn.bias_add(self.convolve6, self.bias6))
            with tf.variable_scope("conv7"):
                self.weights7 = tf.get_variable('weights', [3, 3, 256, 256],
                                                initializer=tf.random_normal_initializer(stddev=1e-2))
                self.bias7 = tf.get_variable('bias', [256], initializer=tf.constant_initializer(0.0))
                self.convolve7 = tf.nn.conv2d(self.conv6, self.weights7, [1, 1, 1, 1], padding="SAME")
                self.conv7 = tf.nn.elu(tf.nn.bias_add(self.convolve7, self.bias7))
            with tf.variable_scope("conv8"):
                self.weights8 = tf.get_variable('weights', [3, 3, 256, 256],
                                                initializer=tf.random_normal_initializer(stddev=1e-2))
                self.bias8 = tf.get_variable('bias', [256], initializer=tf.constant_initializer(0.0))
                self.convolve8 = tf.nn.conv2d(self.conv7, self.weights8, [1, 1, 1, 1], padding="SAME")
                self.conv8 = tf.nn.elu(tf.nn.bias_add(self.convolve8, self.bias8))
            self.pool3 = tf.nn.max_pool(self.conv8, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME', name='pool3')

        with tf.variable_scope("group4"):
            with tf.variable_scope("conv9"):
                self.weights9 = tf.get_variable('weights', [3, 3, 256, 512],
                                                initializer=tf.random_normal_initializer(stddev=1e-2))
                self.bias9 = tf.get_variable('bias', [512], initializer=tf.constant_initializer(0.0))
                self.convolve9 = tf.nn.conv2d(self.pool3, self.weights9, [1, 1, 1, 1], padding="SAME")
                self.conv9 = tf.nn.elu(tf.nn.bias_add(self.convolve9, self.bias9))
            with tf.variable_scope("conv10"):
                self.weights10 = tf.get_variable('weights', [3, 3, 512, 512],
                                                initializer=tf.random_normal_initializer(stddev=1e-2))
                self.bias10 = tf.get_variable('bias', [512], initializer=tf.constant_initializer(0.0))
                self.convolve10 = tf.nn.conv2d(self.conv9, self.weights10, [1, 1, 1, 1], padding="SAME")
                self.conv10 = tf.nn.elu(tf.nn.bias_add(self.convolve10, self.bias10))
            with tf.variable_scope("conv11"):
                self.weights11 = tf.get_variable('weights', [3, 3, 512, 512],
                                                 initializer=tf.random_normal_initializer(stddev=1e-2))
                self.bias11 = tf.get_variable('bias', [512], initializer=tf.constant_initializer(0.0))
                self.convolve11 = tf.nn.conv2d(self.conv10, self.weights11, [1, 1, 1, 1], padding="SAME")
                self.conv11 = tf.nn.elu(tf.nn.bias_add(self.convolve11, self.bias11))
            with tf.variable_scope("conv12"):
                self.weights12 = tf.get_variable('weights', [3, 3, 512, 512],
                                                 initializer=tf.random_normal_initializer(stddev=1e-2))
                self.bias12 = tf.get_variable('bias', [512], initializer=tf.constant_initializer(0.0))
                self.convolve12 = tf.nn.conv2d(self.conv11, self.weights12, [1, 1, 1, 1], padding="SAME")
                self.conv12 = tf.nn.elu(tf.nn.bias_add(self.convolve12, self.bias12))
            self.pool4 = tf.nn.max_pool(self.conv12, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME', name="pool4")

        with tf.variable_scope("group5"):
            with tf.variable_scope("conv13"):
                self.weights13 = tf.get_variable('weights', [3, 3, 512, 512],
                                                 initializer=tf.random_normal_initializer(stddev=1e-2))
                self.bias13 = tf.get_variable('bias', [512], initializer=tf.constant_initializer(0.0))
                self.convolve13 = tf.nn.conv2d(self.pool4, self.weights13, [1, 1, 1, 1], padding="SAME")
                self.conv13 = tf.nn.elu(tf.nn.bias_add(self.convolve13, self.bias13))

            with tf.variable_scope("conv14"):
                self.weights14 = tf.get_variable('weights', [3, 3, 512, 512],
                                                 initializer=tf.random_normal_initializer(stddev=1e-2))
                self.bias14 = tf.get_variable('bias', [512], initializer=tf.constant_initializer(0.0))
                self.convolve14 = tf.nn.conv2d(self.conv13, self.weights14, [1, 1, 1, 1], padding="SAME")
                self.conv14 = tf.nn.elu(tf.nn.bias_add(self.convolve14, self.bias14))
            with tf.variable_scope("conv15"):
                self.weights15 = tf.get_variable('weights', [3, 3, 512, 512],
                                                 initializer=tf.random_normal_initializer(stddev=1e-2))
                self.bias15 = tf.get_variable('bias', [512], initializer=tf.constant_initializer(0.0))
                self.convolve15 = tf.nn.conv2d(self.conv14, self.weights15, [1, 1, 1, 1], padding="SAME")
                self.conv15 = tf.nn.elu(tf.nn.bias_add(self.convolve15, self.bias15))
            with tf.variable_scope("conv16"):
                self.weights16 = tf.get_variable('weights', [3, 3, 512, 512],
                                                 initializer=tf.random_normal_initializer(stddev=1e-2))
                self.bias16 = tf.get_variable('bias', [512], initializer=tf.constant_initializer(0.0))
                self.convolve16 = tf.nn.conv2d(self.conv15, self.weights16, [1, 1, 1, 1], padding="SAME")
                self.conv16 = tf.nn.elu(tf.nn.bias_add(self.convolve16, self.bias16))
            self.pool5 = tf.nn.max_pool(self.conv16, [1, 2, 2, 1], [1, 2, 2, 1], "SAME", name="pool5")

        with tf.variable_scope("group6"):
            self.fc6 = tf.reshape(self.pool5, [-1, 4096], 'fc6')
            with tf.variable_scope("fc7"):
                self.weights17 = tf.get_variable('weights', [4096, 4096],
                                                 initializer=tf.random_normal_initializer(stddev=1e-2))
                self.bias17 = tf.get_variable('bias', [4096], initializer=tf.constant_initializer(0.0))
                self.fc7 = tf.nn.elu(tf.nn.bias_add(tf.matmul(self.fc6, self.weights17), self.bias17))
            with tf.variable_scope("logits"):
                self.weights17 = tf.get_variable('weights', [4096, n_classes],
                                                 initializer=tf.random_normal_initializer(stddev=1e-2))
                self.bias17 = tf.get_variable('bias', [n_classes], initializer=tf.constant_initializer(0.0))
                self.logits = tf.nn.elu(tf.nn.bias_add(tf.matmul(self.fc6, self.weights17), self.bias17))

        self.softmax = tf.nn.softmax(self.logits, 'softmax')

我反复阅读并阅读了本文,并搜索了其他Tensorflow实现。在这一点上,我认为我对原因一无所知。从self.pool5到self.fc6的重塑时出现错误的形状错误。self.pool5最终以(?,8,8,512)形状(?,7,7,512)结束。任何帮助将不胜感激。

谢谢。

mc室

我认为您的问题是,您试图将pool5重塑为完全连接的层4096的大小。但是那些4096只是fc层具有的神经元数量,而不是其输入数量。我试图相应地更改您的代码。

        with tf.variable_scope("conv16"):
                self.weights16 = tf.get_variable('weights', [3, 3, 512, 512],
                                                 initializer=tf.random_normal_initializer(stddev=1e-2))
                self.bias16 = tf.get_variable('bias', [512], initializer=tf.constant_initializer(0.0))
                self.convolve16 = tf.nn.conv2d(self.conv15, self.weights16, [1, 1, 1, 1], padding="SAME")
                self.conv16 = tf.nn.elu(tf.nn.bias_add(self.convolve16, self.bias16))
            self.pool5 = tf.nn.max_pool(self.conv16, [1, 2, 2, 1], [1, 2, 2, 1], "SAME", name="pool5")
            self.pool5_flatten = tf.reshape(self.pool5, [-1, 25088])

        with tf.variable_scope("group6"):
            with tf.variable_scope("fc7"):
                self.weights17 = tf.get_variable('weights', [25088, 4096],
                                                 initializer=tf.random_normal_initializer(stddev=1e-2))
                self.bias17 = tf.get_variable('bias', [4096], initializer=tf.constant_initializer(0.0))
                self.fc7 = tf.nn.elu(tf.nn.bias_add(tf.matmul(self.pool5_flatten, self.weights17), self.bias17))
            with tf.variable_scope("logits"):
                self.weights17 = tf.get_variable('weights', [4096, n_classes],
                                                 initializer=tf.random_normal_initializer(stddev=1e-2))
                self.bias17 = tf.get_variable('bias', [n_classes], initializer=tf.constant_initializer(0.0))
                self.logits = tf.nn.elu(tf.nn.bias_add(tf.matmul(self.fc7, self.weights17), self.bias17))

        self.softmax = tf.nn.softmax(self.logits, 'softmax')

本文收集自互联网,转载请注明来源。

如有侵权,请联系[email protected] 删除。

编辑于
0

我来说两句

0条评论
登录后参与评论

相关文章

来自分类Dev

如何在 Keras Application VGG19 网络中获得 pre relu 层?

来自分类Dev

在张量流中扩展张量

来自分类Dev

在张量流中连接两个 RNN 状态

来自分类Dev

使用预训练vgg19 tensorflow,Keras在CNN自动编码器中定义自定义损失(感知损失)

来自分类Dev

张量流中样本权重的多任务学习-形状问题

来自分类Dev

张量流中的外积

来自分类Dev

来自tf.keras的VGG19不支持索引

来自分类Dev

如何使用VGG19转移学习预训练

来自分类Dev

使用VGG19进行特征提取时有太多的空值特征

来自分类Dev

VGG19 Conv4-1 层的 Keras 输出维度与模型输出不匹配

来自分类Dev

预训练模型在ResNet,InceptionNet上运行良好,但无法在VGG16和VGG19上运行

来自分类Dev

张量流中张量列表的总和

来自分类Dev

在张量流中更改张量的比例

来自分类Dev

在张量流中索引一维张量

来自分类Dev

放大张量流中的张量

来自分类Dev

从张量流中的张量提取特定元素

来自分类Dev

更新张量流中的张量

来自分类Dev

从张量流中的张量中提取值

来自分类Dev

张量流中的邻居总数

来自分类Dev

张量流2中的ConvLSTMCell

来自分类Dev

张量流中的花式索引

来自分类Dev

张量流中的离散分布

来自分类Dev

张量流中“MatMul”的ValueError

来自分类Dev

张量流中的参数值

来自分类Dev

在张量流中重置图形

来自分类Dev

张量流中的矩阵乘法

来自分类Dev

张量流中的浮点精度

来自分类Dev

张量流中的元素分配

来自分类Dev

如何在张量流的二维卷积中连接两个具有不同形状的张量?