import tensorflow as tfimport numpy as npimport input_datamnist = input_data.read_data_sets(\'data/\', one_hot=True)print(\"MNIST ready\")n_input = 784 # 28*28的灰度图,像素个数784n_output = 10 # 是10分类问题# 权重项weights = {# conv1,参数[3, 3, 1, 32]分别指定了filter的h、w、所连接输入的维度、filter的个数即产生特征图个数\'wc1\': tf.Variable(tf.random_normal([3, 3, 1, 32], stddev=0.1)),# conv2,这里参数3,3同上,32是当前连接的深度是32,即前面特征图的个数,64为输出的特征图的个数\'wc2\': tf.Variable(tf.random_normal([3, 3, 32, 64], stddev=0.1)),# fc1,将特征图转换为向量,1024由自己定义\'wd1\': tf.Variable(tf.random_normal([7*7*64, 1024], stddev=0.1)),# fc2,做10分类任务,前面连1024,输出10分类\'wd2\': tf.Variable(tf.random_normal([1024, n_output], stddev=0.1))}\"\"\"特征图大小计算:f_w = (w-f+2*pad)/s + 1 = (28-3+2*1)/1 + 1 = 28 # 说明经过卷积层并没有改变图片的大小f_h = (h-f+2*pad)/s + 1 = (28-3+2*1)/1 + 1 = 28# 特征图的大小是经过池化层后改变的第一次pooling后28*28变为14*14第二次pooling后14*14变为7*7,即最终是一个7*7*64的特征图\"\"\"# 偏置项biases = {\'bc1\': tf.Variable(tf.random_normal([32], stddev=0.1)), # conv1,对应32个特征图\'bc2\': tf.Variable(tf.random_normal([64], stddev=0.1)), # conv2,对应64个特征图\'bd1\': tf.Variable(tf.random_normal([1024], stddev=0.1)), # fc1,对应1024个向量\'bd2\': tf.Variable(tf.random_normal([n_output], stddev=0.1)) # fc2,对应10个输出}def conv_basic(_input, _w, _b, _keep_prob):# INPUT# 对图像做预处理,转换为tf支持的格式,即[n, h, w, c],-1是确定好其它3维后,让tf去推断剩下的1维_input_r = tf.reshape(_input, shape=[-1, 28, 28, 1])# CONV LAYER 1_conv1 = tf.nn.conv2d(_input_r, _w[\'wc1\'], strides=[1, 1, 1, 1], padding=\'SAME\')# [1, 1, 1, 1]分别代表batch_size、h、w、c的stride# padding有两种选择:\'SAME\'(窗口滑动时,像素不够会自动补0)或\'VALID\'(不够就跳过)两种选择_conv1 = tf.nn.relu(tf.nn.bias_add(_conv1, _b[\'bc1\'])) # 卷积层后连激活函数# 最大值池化,[1, 2, 2, 1]其中1,1对应batch_size和channel,2,2对应2*2的池化_pool1 = tf.nn.max_pool(_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\'SAME\')# 随机杀死一些神经元,_keepratio为保留神经元比例,如0.6_pool_dr1 = tf.nn.dropout(_pool1, _keep_prob)# CONV LAYER 2_conv2 = tf.nn.conv2d(_pool_dr1, _w[\'wc2\'], strides=[1, 1, 1, 1], padding=\'SAME\')_conv2 = tf.nn.relu(tf.nn.bias_add(_conv2, _b[\'bc2\']))_pool2 = tf.nn.max_pool(_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\'SAME\')_pool_dr2 = tf.nn.dropout(_pool2, _keep_prob) # dropout# VECTORIZE向量化# 定义全连接层的输入,把pool2的输出做一个reshape,变为向量的形式_densel = tf.reshape(_pool_dr2, [-1, _w[\'wd1\'].get_shape().as_list()[0]])# FULLY CONNECTED LAYER 1_fc1 = tf.nn.relu(tf.add(tf.matmul(_densel, _w[\'wd1\']), _b[\'bd1\'])) # w*x+b,再通过relu_fc_dr1 = tf.nn.dropout(_fc1, _keep_prob) # dropout# FULLY CONNECTED LAYER 2_out = tf.add(tf.matmul(_fc_dr1, _w[\'wd2\']), _b[\'bd2\']) # w*x+b,得到结果# RETURNout = {\'input_r\': _input_r, \'conv1\': _conv1, \'pool1\': _pool1, \'pool_dr1\': _pool_dr1,\'conv2\': _conv2, \'pool2\': _pool2, \'pool_dr2\': _pool_dr2, \'densel\': _densel,\'fc1\': _fc1, \'fc_dr1\': _fc_dr1, \'out\': _out}return outprint(\"CNN READY\")x = tf.placeholder(tf.float32, [None, n_input]) # 用placeholder先占地方,样本个数不确定为Noney = tf.placeholder(tf.float32, [None, n_output]) # 用placeholder先占地方,样本个数不确定为Nonekeep_prob = tf.placeholder(tf.float32)_pred = conv_basic(x, weights, biases, keep_prob)[\'out\'] # 前向传播的预测值cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred, y)) # 交叉熵损失函数optm = tf.train.AdamOptimizer(0.001).minimize(cost) # 梯度下降优化器_corr = tf.equal(tf.argmax(_pred, 1), tf.argmax(y, 1)) # 对比预测值索引和实际label索引,相同返回True,不同返回Falseaccr = tf.reduce_mean(tf.cast(_corr, tf.float32)) # 将True或False转换为1或0,并对所有的判断结果求均值init = tf.global_variables_initializer()print(\"FUNCTIONS READY\")# 上面神经网络结构定义好之后,下面定义一些超参数training_epochs = 1000 # 所有样本迭代1000次batch_size = 100 # 每进行一次迭代选择100个样本display_step = 1# LAUNCH THE GRAPHsess = tf.Session() # 定义一个Sessionsess.run(init) # 在sess里run一下初始化操作# OPTIMIZEfor epoch in range(training_epochs):avg_cost = 0.total_batch = int(mnist.train.num_examples/batch_size)for i in range(total_batch):batch_xs, batch_ys = mnist.train.next_batch(batch_size) # 逐个batch的去取数据sess.run(optm, feed_dict={x: batch_xs, y: batch_ys, keep_prob:0.5})avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob:1.0})/total_batchif epoch % display_step == 0:train_accuracy = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.0})test_accuracy = sess.run(accr, feed_dict={x: mnist.test.images, y: mnist.test.labels, keep_prob:1.0})print(\"Epoch: %03d/%03d cost: %.9f TRAIN ACCURACY: %.3f TEST ACCURACY: %.3f\"% (epoch, training_epochs, avg_cost, train_accuracy, test_accuracy))print(\"DONE\")
我用的显卡是GTX960,在跑这个卷积神经网络的时候,第一次filter分别设的是64和128,结果报蜜汁错误了,反正就是我显存不足,所以改成了32和64,让特征图少一点。所以,是让我换1080的意思喽
I c:\\tf_jenkins\\home\\workspace\\release-win\\device\\gpu\\os\\windows\\tensorflow\\core\\common_runtime\\gpu\\gpu_device.cc:885] Found device 0 with properties:name: GeForce GTX 960major: 5 minor: 2 memoryClockRate (GHz) 1.304pciBusID 0000:01:00.0Total memory: 4.00GiBFree memory: 3.33GiBI c:\\tf_jenkins\\home\\workspace\\release-win\\device\\gpu\\os\\windows\\tensorflow\\core\\common_runtime\\gpu\\gpu_device.cc:906] DMA: 0I c:\\tf_jenkins\\home\\workspace\\release-win\\device\\gpu\\os\\windows\\tensorflow\\core\\common_runtime\\gpu\\gpu_device.cc:916] 0: YI c:\\tf_jenkins\\home\\workspace\\release-win\\device\\gpu\\os\\windows\\tensorflow\\core\\common_runtime\\gpu\\gpu_device.cc:975] Creating TensorFlow device (/gpu:0) -> (device: 0, name: GeForce GTX 960, pci bus id: 0000:01:00.0)W c:\\tf_jenkins\\home\\workspace\\release-win\\device\\gpu\\os\\windows\\tensorflow\\core\\common_runtime\\bfc_allocator.cc:217] Ran out of memory trying to allocate 2.59GiB. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory is available.W c:\\tf_jenkins\\home\\workspace\\release-win\\device\\gpu\\os\\windows\\tensorflow\\core\\common_runtime\\bfc_allocator.cc:217] Ran out of memory trying to allocate 1.34GiB. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory is available.W c:\\tf_jenkins\\home\\workspace\\release-win\\device\\gpu\\os\\windows\\tensorflow\\core\\common_runtime\\bfc_allocator.cc:217] Ran out of memory trying to allocate 2.10GiB. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory is available.W c:\\tf_jenkins\\home\\workspace\\release-win\\device\\gpu\\os\\windows\\tensorflow\\core\\common_runtime\\bfc_allocator.cc:217] Ran out of memory trying to allocate 3.90GiB. The caller indicates that this is not a failure, but may mean that there could be performance gains if more memory is available.Epoch: 000/1000 cost: 0.517761162 TRAIN ACCURACY: 0.970 TEST ACCURACY: 0.967Epoch: 001/1000 cost: 0.093012387 TRAIN ACCURACY: 0.960 TEST ACCURACY: 0.979...省略
以上就是TensorFlow另一种程序风格实现卷积神经网络的详细内容,更多关于TensorFlow卷积神经网络的资料请关注脚本之家其它相关文章!
您可能感兴趣的文章:
- Tensorflow实现AlexNet卷积神经网络及运算时间评测
- Tensorflow深度学习使用CNN分类英文文本
- python深度学习TensorFlow神经网络模型的保存和读取
- TensorFlow教程Softmax逻辑回归识别手写数字MNIST数据集
- Mysql数据库手动及定时备份步骤
- TensorFlow卷积神经网络AlexNet实现示例详解