cifar10分类 测试机准确率达到99%

今天写了一个cirfar10训练集的分类器,top1准确率达到了99%。备份一份在简书上

“`

import tensorflow as tf

from tutorials.image.cifar10 import cifar10

from tutorials.image.cifar10 import cifar10_input

from sklearn import datasets

from matplotlib import pyplot as plt

import time 

import numpy as np

import os

import math 

batch_size = 128

images_train, labels_train = cifar10_input.distorted_inputs(batch_size)

images_test,labels_test = cifar10_input.inputs(True,batch_size)

learning_rate = 1e-3

max_iter_step = 5

log_dir = “./logs/tensorboard/my_cifar10/”

model_dir = ‘./ckpt/my_cifar10/’

model_path = model_dir + ‘final.ckpt’

keep_prob = tf.placeholder(dtype=tf.float32)

is_training = tf.placeholder(dtype=tf.bool,name=”is_training”)

def weights_init(name,shape,stddev= None,w1=None):

    #权重参数初始化

    init = tf.truncated_normal(shape,dtype=tf.float32,stddev=stddev)

    var= tf.Variable(init,name=name)

    if w1 is not None: 

        weight_loss = tf.multiply(tf.nn.l2_loss(var),w1,name=name + ‘_weight_loss’) 

        tf.add_to_collection(‘losses’,weight_loss)

    return var

def bias_init(name,shape):

    init = tf.zeros(shape)

    return tf.Variable(init,name=name)

def conv2d(inputs,filter):

    return tf.nn.conv2d(inputs,filter,[1,1,1,1],padding=”SAME”)

def max_pool(inputs,ksize,strides=[1,2,2,1]):

    return tf.nn.max_pool(inputs,ksize=ksize,strides=strides,padding=”SAME”)

with tf.name_scope(‘inputs’):

    X = tf.placeholder(dtype=tf.float32,shape=[batch_size,24,24,3],name=”X”)

    y = tf.placeholder(dtype=tf.int32,shape=[batch_size],name=”y”)

#第一层卷积 5*5 32  strides=1

with tf.name_scope(“layer1_conv2d”):

    filter1 = weights_init(“convolution_kernel_1”,[5,5,3,70],stddev=0.001)

    bias1 = bias_init(“bias_1”,[70]) 

    A1 = tf.nn.relu(conv2d(X,filter1) + bias1)

    P1 = max_pool(A1,[1,3,3,1])

    N1 = tf.layers.batch_normalization(P1, training=is_training)

#第二层卷积

with tf.name_scope(“layer2_conv2d”):

    filter2 = weights_init(“convolution_kernel_2”,[5,5,70,140],stddev=0.001)

    bias2 = bias_init(“bias_2”,[140]) 

    A2 = tf.nn.relu(conv2d(N1,filter2) + bias2)

    P2 = max_pool(A2,[1,3,3,1])

    N2 = tf.layers.batch_normalization(P2, training=is_training)

with tf.name_scope(“flatten”):

    flatten  = tf.reshape(N2,[batch_size,-1],name=”flatten”)

    dim = flatten.shape[1].value

with tf.name_scope(“layer_full_connect_1”):

    fc_drop1 = tf.nn.dropout(flatten, keep_prob)

    fc_weights1 =  weights_init(“fc_weights_1”,[dim,5054],stddev=0.001)

    fc_bias1 = bias_init(“fc_bias_1”,[5054])

    fc1 = tf.matmul(fc_drop1,fc_weights1)  + fc_bias1

    fc_a1 = tf.nn.relu(fc1)

    fc_n1 = tf.layers.batch_normalization(fc_a1, training=is_training)

with tf.name_scope(“layer_full_connect_2”):

    fc_drop2 = tf.nn.dropout(fc_n1, keep_prob)

    fc_weights2 =  weights_init(“fc_weights_2”,[5054,1024],stddev=0.001)

    fc_bias2 = bias_init(“fc_bias_2”,[1024])

    fc2 = tf.matmul(fc_drop2,fc_weights2)  + fc_bias2

    fc_a2 = tf.nn.relu(fc2)

    fc_n2 = tf.layers.batch_normalization(fc_a2, training=is_training)

with tf.name_scope(“layer_before_softmax”):

    f_weights =  weights_init(“final_weights”,[1024,10],stddev=1/300)

    f_bias = bias_init(“final_bias”,[10])

    logits = tf.matmul(fc_n2,f_weights)  + f_bias

with tf.name_scope(“loss”):

    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,logits=logits,name=”cross_entropy”)

    _loss = tf.reduce_mean(cross_entropy)

    tf.add_to_collection(‘losses’,_loss)

    loss = tf.add_n(tf.get_collection(‘losses’),name=’total_loss’)

    tf.summary.scalar(“loss”,loss)

with tf.name_scope(“train”):

    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):

        train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)

with tf.name_scope(“estimator”):

    correct_prediction = tf.nn.in_top_k(logits,y,1)  # top1 准确率

    accuracy = tf.reduce_mean(tf.cast(correct_prediction,”float”))

    tf.summary.scalar(“accuracy”,accuracy)

saver = tf.train.Saver()

init = tf.global_variables_initializer()

with tf.Session() as sess:

    if(os.path.exists(model_dir)):

        saver.restore(sess,model_path)

    else:

        sess.run(init)

    merge_summary = tf.summary.merge_all()

    train_writer = tf.summary.FileWriter(log_dir + ‘train’,sess.graph)

    for step in range(max_iter_step):

        start_time = time.time() 

        train_x,train_y = sess.run([images_train,labels_train])

        if(step % 10 ==0):

            _,loss_value,summary,acc = sess.run([train_op,loss,merge_summary,accuracy],feed_dict={X:train_x,y:train_y,keep_prob:1,is_training:True})

            duration = time.time() – start_time

            train_writer.add_summary(summary,step)

            examples_per_sec = batch_size/duration 

            sec_per_batch = float(duration)

            format_str = (‘step %d,loss=%.2f,acc=%.5f (%.1f examples/sec; %.3f sec/batch’) 

            print(format_str % (step,loss_value,acc,examples_per_sec,sec_per_batch))

        else:

            sess.run([train_op],feed_dict={X:train_x,y:train_y,keep_prob:1,is_training:True})

        if(step % 1000 ==0):

            saver.save(sess,model_path)

    saver.save(sess,model_path)       

    # 评测模型在测试集上的准确度 

    num_iter = 2000

    true_count = 0 

    total_sample_count = num_iter * batch_size 

    step = 0 

    while step < num_iter: 

        image_batch,label_batch = sess.run([images_test,labels_test]) 

        predictions = sess.run([correct_prediction],feed_dict={X:image_batch,y:label_batch,keep_prob:1,is_training:False}) 

        true_count += np.sum(predictions) 

        step += 1 

    # 打印结果 

    precision = true_count / total_sample_count 

    print(‘precision @ 1 = %.3f’ % precision)

train_writer.close()

“`

https://www.jianshu.com/p/2cea038f9d3f

Python量化投资网携手4326手游为资深游戏玩家推荐:《活下去下载

「点点赞赏,手留余香」

    还没有人赞赏,快来当第一个赞赏的人吧!
NumPy
0 条回复 A 作者 M 管理员
    所有的伟大,都源于一个勇敢的开始!
欢迎您,新朋友,感谢参与互动!欢迎您 {{author}},您在本站有{{commentsCount}}条评论