TensorFlow基础笔记(14) 网络模型的保存与恢复_mnist数据实例

   2023-02-09 学习力1030
核心提示:http://blog.csdn.net/huachao1001/article/details/78502910http://blog.csdn.net/u014432647/article/details/75276718https://zhuanlan.zhihu.com/p/32887066#coding:utf-8#http://blog.csdn.net/zhuiqiuk/article/details/53376283#http://blog.csdn.net/

http://blog.csdn.net/huachao1001/article/details/78502910

http://blog.csdn.net/u014432647/article/details/75276718

https://zhuanlan.zhihu.com/p/32887066

#coding:utf-8
#http://blog.csdn.net/zhuiqiuk/article/details/53376283
#http://blog.csdn.net/gan_player/article/details/77586489
from __future__ import absolute_import, unicode_literals
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import shutil
import os.path
from tensorflow.python.framework import graph_util

def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)

def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1], padding='SAME')


def inference(input_image, keep_prob):
    W_conv1 = weight_variable([5, 5, 1, 32])
    b_conv1 = bias_variable([32])
    x_image = tf.reshape(input_image, [-1, 28, 28, 1])
    h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
    h_pool1 = max_pool_2x2(h_conv1)

    W_conv2 = weight_variable([5, 5, 32, 64])
    b_conv2 = bias_variable([64])
    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
    h_pool2 = max_pool_2x2(h_conv2)

    W_fc1 = weight_variable([7 * 7 * 64, 1024])
    b_fc1 = bias_variable([1024])
    h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

    #keep_prob = tf.placeholder("float")
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    W_fc2 = weight_variable([1024, 10])
    b_fc2 = bias_variable([10])

    logits = tf.matmul(h_fc1_drop, W_fc2) + b_fc2

    return logits

def train(export_dir):
    mnist = input_data.read_data_sets("datasets", one_hot=True)

    g = tf.Graph()
    with g.as_default():
        x = tf.placeholder("float", shape=[None, 784])
        y_ = tf.placeholder("float", shape=[None, 10])
        keep_prob = tf.placeholder("float")

        logits = inference(x, keep_prob)
        y_conv = tf.nn.softmax(logits)

        cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))
        train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
        correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

        sess = tf.Session()
        sess.run(tf.initialize_all_variables())

        

        for i in range(201):
            batch = mnist.train.next_batch(50)
            if i % 100 == 0:
                train_accuracy = accuracy.eval(
                    {x: batch[0], y_: batch[1], keep_prob: 1.0}, sess)
                print "step %d, training accuracy %g" % (i, train_accuracy)
            train_step.run(
                {x: batch[0], y_: batch[1], keep_prob: 0.5}, sess)

        print "test accuracy %g" % accuracy.eval(
            {x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}, sess)

        saver = tf.train.Saver()
        step = 200
        checkpoint_file = os.path.join(export_dir, 'model.ckpt')
        saver.save(sess, checkpoint_file, global_step=step)
        checkpoint_file = os.path.join(export_dir, 'model.ckpt')


def export_pb_model(model_name):
  graph = tf.Graph()
  with graph.as_default():
    input_image = tf.placeholder("float", shape=[None,28*28], name='inputdata')
    keep_prob = tf.placeholder("float",  name = 'keep_probdata')
    logits = inference(input_image, keep_prob)
    y_conv = tf.nn.softmax(logits,name='outputdata')
    restore_saver = tf.train.Saver()

  with tf.Session(graph=graph) as sess:
    sess.run(tf.global_variables_initializer())
    latest_ckpt = tf.train.latest_checkpoint('log')
    restore_saver.restore(sess, latest_ckpt)
    output_graph_def = tf.graph_util.convert_variables_to_constants(
        sess, graph.as_graph_def(), ['outputdata'])

#    tf.train.write_graph(output_graph_def, 'log', model_name, as_text=False)
    with tf.gfile.GFile(model_name, "wb") as f:  
        f.write(output_graph_def.SerializeToString()) 


def test_pb_model(model_name):
    mnist = input_data.read_data_sets("datasets", one_hot=True)

    with tf.Graph().as_default():
        output_graph_def = tf.GraphDef()
        output_graph_path = model_name
    #    sess.graph.add_to_collection("input", mnist.test.images)

        with open(output_graph_path, "rb") as f:
            output_graph_def.ParseFromString(f.read())
            tf.import_graph_def(output_graph_def, name="")

        with tf.Session() as sess:

            tf.initialize_all_variables().run()
            input_x = sess.graph.get_tensor_by_name("inputdata:0")        
            output = sess.graph.get_tensor_by_name("outputdata:0")
            keep_prob = sess.graph.get_tensor_by_name("keep_probdata:0")

            y_conv_2 = sess.run(output,{input_x:mnist.test.images, keep_prob: 1.0})
            print( "y_conv_2", y_conv_2)

            # Test trained model
            #y__2 = tf.placeholder("float", [None, 10])
            y__2 = mnist.test.labels
            correct_prediction_2 = tf.equal(tf.argmax(y_conv_2, 1), tf.argmax(y__2, 1))
            print ("correct_prediction_2", correct_prediction_2 )
            accuracy_2 = tf.reduce_mean(tf.cast(correct_prediction_2, "float"))
            print ("accuracy_2", accuracy_2)

            print ("check accuracy %g" % accuracy_2.eval())


if __name__ == '__main__':
    export_dir = './log'
    if os.path.exists(export_dir):
        shutil.rmtree(export_dir)
    #训练并保存模型ckpt
    train(export_dir)
    model_name = os.path.join(export_dir, 'mnist.pb')
    #ckpt模型转换为pb模型
    export_pb_model(model_name)
    #测试pb模型
    test_pb_model(model_name)

 

$ python mymain.py

#训练并保存模型ckpt

Extracting datasets/train-images-idx3-ubyte.gz
Extracting datasets/train-labels-idx1-ubyte.gz
Extracting datasets/t10k-images-idx3-ubyte.gz
Extracting datasets/t10k-labels-idx1-ubyte.gz
2018-03-19 18:11:27.046638: I tensorflow/core/platform/cpu_feature_guard.cc:137] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA
2018-03-19 18:11:27.169530: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:892] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2018-03-19 18:11:27.170178: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1030] Found device 0 with properties:
name: GeForce GTX 1080 major: 6 minor: 1 memoryClockRate(GHz): 1.7335
pciBusID: 0000:01:00.0
totalMemory: 7.92GiB freeMemory: 5.51GiB
2018-03-19 18:11:27.170196: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1120] Creating TensorFlow device (/device:GPU:0) -> (device: 0, name: GeForce GTX 1080, pci bus id: 0000:01:00.0, compute capability: 6.1)
WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/python/util/tf_should_use.py:107: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
step 0, training accuracy 0.08
step 100, training accuracy 0.86
step 200, training accuracy 0.96
2018-03-19 18:11:29.100338: W tensorflow/core/common_runtime/bfc_allocator.cc:217] Allocator (GPU_0_bfc) ran out of memory trying to allocate 3.32GiB. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory is available.
test accuracy 0.9137

 

#ckpt模型转换为pb模型

2018-03-19 18:11:30.655025: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1120] Creating TensorFlow device (/device:GPU:0) -> (device: 0, name: GeForce GTX 1080, pci bus id: 0000:01:00.0, compute capability: 6.1)
Converted 8 variables to const ops.

 

#测试pb模型

Extracting datasets/train-images-idx3-ubyte.gz
Extracting datasets/train-labels-idx1-ubyte.gz
Extracting datasets/t10k-images-idx3-ubyte.gz
Extracting datasets/t10k-labels-idx1-ubyte.gz
2018-03-19 18:11:32.419375: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1120] Creating TensorFlow device (/device:GPU:0) -> (device: 0, name: GeForce GTX 1080, pci bus id: 0000:01:00.0, compute capability: 6.1)
(u'y_conv_2', array([[ 3.83061661e-06, 2.25869144e-06, 6.98342774e-05, ...,
9.99720514e-01, 5.38732929e-05, 6.28733032e-05],
[ 1.85461645e-03, 3.86392418e-03, 9.55442667e-01, ...,
1.31935649e-05, 2.71034874e-02, 4.14738406e-06],
[ 2.61329369e-05, 9.94501710e-01, 1.34233199e-03, ...,
8.23311449e-04, 1.73626456e-03, 3.27934940e-05],
...,
[ 1.58834242e-04, 1.02327869e-03, 9.29224771e-04, ...,
9.04104114e-03, 1.03222862e-01, 1.16873145e-01],
[ 9.86627676e-03, 1.02333550e-03, 2.13368423e-03, ...,
2.72160349e-03, 3.91508579e-01, 4.37955791e-03],
[ 1.95508893e-03, 2.17417346e-06, 1.18497398e-03, ...,
5.04385412e-07, 3.14567442e-05, 4.29990359e-06]], dtype=float32))
(u'correct_prediction_2', <tf.Tensor 'Equal:0' shape=(10000,) dtype=bool>)
(u'accuracy_2', <tf.Tensor 'Mean:0' shape=() dtype=float32>)
check accuracy 0.9137

 

 
反对 0举报 0
 

免责声明:本文仅代表作者个人观点,与乐学笔记(本网)无关。其原创性以及文中陈述文字和内容未经本站证实,对本文以及其中全部或者部分内容、文字的真实性、完整性、及时性本站不作任何保证或承诺,请读者仅作参考,并请自行核实相关内容。
    本网站有部分内容均转载自其它媒体,转载目的在于传递更多信息,并不代表本网赞同其观点和对其真实性负责,若因作品内容、知识产权、版权和其他问题,请及时提供相关证明等材料并与我们留言联系,本网站将在规定时间内给予删除等相关处理.

  • 深度学习-Tensorflow2.2-多分类{8}-多输出模型实例-20
    深度学习-Tensorflow2.2-多分类{8}-多输出模型
    import tensorflow as tffrom tensorflow import kerasimport matplotlib.pyplot as plt%matplotlib inlineimport numpy as npimport pathlibimport osimport randomos.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'import IPython.display as display
    03-08
  • TensorFlow与Flask结合识别手写体数字
    TensorFlow与Flask结合识别手写体数字
    阅读本文约“2.2分钟”TensorFlow框架        ——TensorFlow是谷歌基于DistBelief进行研发的第二代人工智能学习系统        ——可被用于语音识别或图像识别等多项机器学习和深度学习领域        ——TensorFlow是将复杂的数据结构
    03-08
  • 在 GPU 上运行 TensorFlow 教程 (MNIST)
    在 GPU 上运行 TensorFlow 教程 (MNIST)
    介绍我想接触TensorFlow,暂时想运行教程。有GPU!这就是我的想法,但是我很难让它发挥作用,所以我将把信息留在这里。它是为谁准备的?想要在 GPU 上运行 TensorFlow 的人概述基本遵循TensorFlow官方教程它是作为初学者编写的,所以如果它顺利,它很容易!ht
    03-08
  • TensorFlow 开发者证书 PyCharm 相关环境创建与错误斗争+复习
    TensorFlow 开发者证书 PyCharm 相关环境创建与
    介绍2022 年 11 月TensorFlow 开发者证书通过了特定于测试PyCharm我遇到了一个相关的问题,所以我写了这篇文章,因为我想为将来参加考试的人提供有用的信息。 目标:即将考取TensorFlow开发者证书的人 环境:Windows 11 内容:考试过程中出现的错误及解决方法
    03-08
  • tensorflow2.0——LSTM,GRU(Sequential层版)
    前面都是写的cell版本的GRU和LSTM,比较底层,便于理解原理。下面的Sequential版不用自定义state参数的形状,使用更简便: import tensorflow as tfimport osos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'assert tf.__version__.startswith('2.')# 设置相关底层
    02-10
  • TensorFlow基础笔记(7) 图像风格化效果与性能优
    参考 http://hacker.duanshishi.com/?p=1693http://blog.csdn.net/hungryof/article/details/53981959http://blog.csdn.net/hungryof/article/details/61195783http://blog.csdn.net/wyl1987527/article/details/70245214https://www.ctolib.com/AdaIN-style.
    02-09
  • Tensorflow报错总结 TensorFlow文档
    输入不对应报错内容:WARNING:tensorflow:Model was constructed with shape (None, 79) for input Tensor("genres:0", shape=(None, 79), dtype=float32), but it was called on an input with incompatible shape (128, 5).定义模型的输入和训练时候传入的in
    02-09
  • 深度学习框架之TensorFlow的概念及安装(ubuntu
    2015年11月9日,Google发布人工智能系统TensorFlow并宣布开源。TensorFlow 是使用数据流图进行数值计算的开源软件库。也就是说,TensorFlow 使用图(graph)来表示计算任务。图中的节点表示数学运算,边表示运算之间用来交流的多维数组(也就是tensor,张量)
    02-09
  • tensorflow中 tf.train.slice_input_producer 和 tf.train.batch 函数
    tensorflow中 tf.train.slice_input_producer
    tensorflow数据读取机制tensorflow中为了充分利用GPU,减少GPU等待数据的空闲时间,使用了两个线程分别执行数据读入和数据计算。具体来说就是使用一个线程源源不断的将硬盘中的图片数据读入到一个内存队列中,另一个线程负责计算任务,所需数据直接从内存队列
    02-09
  • 机器学习 - pycharm, tensorflow集成篇
    机器学习 - pycharm, tensorflow集成篇
     继续上篇的pyspark集成后,我们再来看看当今热的不得了的tensorflow是如何继承进pycharm环境的参考:  http://blog.csdn.net/include1224/article/details/53452824思路其实很简单,说下要点吧1. python必须要3.5 64位版本(上一篇直接装的是64位版本的An
    02-09
点击排行