tf.data输入模块
import tensorflow as tf#下载数据(train_image,train_lable),(test_image,test_label)=tf.keras.datasets.fashion_mnist.load_data()#数据归一化train_image = train_image/255test_image = test_image/255#创建datasetds_train_img = tf.data.Dataset.from_tensor_slices(train_image)ds_train_lab = tf.data.Dataset.from_tensor_slices(train_lable)#合并de_train = tf.data.Dataset.zip((ds_train_img,ds_train_lab))#乱序,重复,每次输出个数de_train = de_train.shuffle(1000).repeat().batch(32)#另一种方法ds_test = tf.data.Dataset.from_tensor_slices((test_image,test_label))#训练集不用乱序,默认重复de_test = ds_test.batch(32)#创建模型model = tf.keras.Sequential()model.add(tf.keras.layers.Flatten(input_shape=(28,28)))model.add(tf.keras.layers.Dense(128,activation=\'relu\'))model.add(tf.keras.layers.Dense(10,activation=\'softmax\'))#配置优化器及损失函数model.compile(optimizer=\'adam\',loss=\'sparse_categorical_crossentropy\',metrics=[\'acc\'])#计算步长 总个数除batch取整steps_per_epoch = train_image.shape[0]//32validation_steps = test_image.shape[0]//32#训练及测试模型model.fit(de_train,epochs=5,steps_per_epoch=steps_per_epoch,validation_data=de_test,validation_steps=validation_steps)
卷积神经网络
cnn架构
卷积层 conv2d
非线性变化层 relu/sigmiod/tanh
池化层 pooling2d
全连接层 w*x+b
卷积层
ksize 卷积核的大小
strides 卷积核移动跨度
paddig 边缘填充
import tensorflow as tffrom tensorflow import kerasimport numpy as npimport matplotlib.pyplot as plt(train_image,train_lable),(test_image,test_label)=tf.keras.datasets.fashion_mnist.load_data()#扩张维度#输入卷积神经网络参数(个数,高度,宽度,厚度)train_image = np.expand_dims(train_image,-1)test_image = np.expand_dims(test_image,-1)#创建模型model = tf.keras.Sequential()model.add(tf.keras.layers.Conv2D(32,(3,3),input_shape=train_image.shape[1:], activation=\'relu\'))model.add(tf.keras.layers.MaxPooling2D())model.add(tf.keras.layers.Conv2D(64,(3,3),activation=\'relu\'))model.add(tf.keras.layers.GlobalAveragePooling2D())model.add(tf.keras.layers.Dense(10,activation=\'softmax\'))#配置优化器及损失函数model.compile(optimizer=\'adam\',loss=\'sparse_categorical_crossentropy\',metrics=[\'acc\'])# 训练模型history = model.fit(train_image,train_lable,epochs=30,validation_data=(test_image,test_label))#history.history.keys() 查看参数#调用参数,绘制图型plt.plot(history.epoch,history.history[\'acc\'],label=\'acc\')plt.plot(history.epoch,history.history[\'val_acc\'],label=\'val_acc\')plt.plot(history.epoch,history.history[\'loss\'],label=\'loss\')plt.plot(history.epoch,history.history[\'val_loss\'],label=\'val_loss\')plt.show()
tf.data,卷积神经网络 综合实例
import tensorflow as tfimport matplotlib.pyplot as pltimport pathlibimport random#提取所有文件路径data_dir = \'/home/tfb/Downloads/qq-files/1597669291/file_recv/2_class\'data_root = pathlib.Path(data_dir)#for item in data_root.iterdir():#print(item)all_image_path = list(data_root.glob(\'*/*\'))all_image_path = [str(path) for path in all_image_path]random .shuffle(all_image_path)image_count = len(all_image_path)#编码标签label_names = sorted(item.name for item in data_root.glob(\'*/\'))label_to_index = dict((name,index) for index,name in enumerate(label_names))#给图片贴标签#pathlib.Path(\'..../2.jpg\').parent.name 返回上一级目录名称all_image_label = [label_to_index[pathlib.Path(p).parent.name] for p in all_image_path]index_to_label = dict((v,k)for k,v in label_to_index.items())#查看是否对应成功# import IPython.display as display# for n in range(3):# image_index = random.choices(range(len(all_image_path)))# display.display(display.Image(all_image_path[image_index]))# print(index_to_label[all_image_path[image_index]])# 查看图片及结构# img_raw = tf.io.read_file(all_image_path[0]) #读取图片# img_tensor = tf.image.decode_image(img_raw) #解码图片# print(img_tensor.shape)#图片预处理函数def load_preprosess_image(image_path):img_raw = tf.io.read_file(image_path)img_tensor = tf.image.decode_jpeg(img_raw,channels=3)img_tensor = tf.image.resize(img_tensor,[256,256])#img_tensor.shapeimg_tensor = tf.cast(img_tensor,tf.float32)img = img_tensor/255return img#检验是否正确# plt.imshow(load_preprosess_image(all_image_path[100]))# plt.show()# 创建数据集path_ds = tf.data.Dataset.from_tensor_slices(all_image_path)image_dataset = path_ds.map(load_preprosess_image)lible_dataset = tf.data.Dataset.from_tensor_slices(all_image_label)dataset = tf.data.Dataset.zip((image_dataset,lible_dataset))#划分训练集和测试集test_count = int(image_count*0.2)train_count = image_count - test_counttrain_dataset = dataset.skip(test_count)test_dataset = dataset.take(test_count)batch_size = 32train_dataset = train_dataset.repeat().shuffle(buffer_size=train_count).batch(batch_size)test_dataset = test_dataset.batch(batch_size)#建立模型model = tf.keras.Sequential()model.add(tf.keras.layers.Conv2D(64,(3,3),input_shape=(256,256,3),activation=\'relu\'))model.add(tf.keras.layers.BatchNormalization())model.add(tf.keras.layers.Conv2D(64,(3,3),activation=\'relu\'))model.add(tf.keras.layers.BatchNormalization())model.add(tf.keras.layers.MaxPooling2D())model.add(tf.keras.layers.Conv2D(128,(3,3),activation=\'relu\'))model.add(tf.keras.layers.BatchNormalization())model.add(tf.keras.layers.Conv2D(128,(3,3),activation=\'relu\'))model.add(tf.keras.layers.BatchNormalization())model.add(tf.keras.layers.MaxPooling2D())model.add(tf.keras.layers.Conv2D(256,(3,3),activation=\'relu\'))model.add(tf.keras.layers.BatchNormalization())model.add(tf.keras.layers.Conv2D(256,(3,3),activation=\'relu\'))model.add(tf.keras.layers.BatchNormalization())model.add(tf.keras.layers.MaxPooling2D())model.add(tf.keras.layers.Conv2D(512,(3,3),activation=\'relu\'))model.add(tf.keras.layers.BatchNormalization())model.add(tf.keras.layers.MaxPooling2D())model.add(tf.keras.layers.Conv2D(512,(3,3),activation=\'relu\'))model.add(tf.keras.layers.BatchNormalization())model.add(tf.keras.layers.MaxPooling2D())model.add(tf.keras.layers.Conv2D(1024,(3,3),activation=\'relu\'))model.add(tf.keras.layers.BatchNormalization())model.add(tf.keras.layers.GlobalAveragePooling2D())model.add(tf.keras.layers.Dense(1024,activation=\'relu\'))model.add(tf.keras.layers.BatchNormalization())model.add(tf.keras.layers.Dense(256,activation=\'relu\'))model.add(tf.keras.layers.BatchNormalization())model.add(tf.keras.layers.Dense(1,activation=\'sigmoid\'))#训练模型model.compile(optimizer=\'adam\',loss=\'binary_crossentropy\',metrics=[\'acc\'])steps_per_epoch = train_count//batch_sizevalidation_steps = test_count//batch_size#记录数据history = model.fit(train_dataset,epochs=10,steps_per_epoch=steps_per_epoch,validation_data=test_dataset,validation_steps=validation_steps)#history.history.keys() #查看参数#调用参数,绘制图型plt.plot(history.epoch,history.history[\'acc\'],label=\'acc\')plt.plot(history.epoch,history.history[\'val_acc\'],label=\'val_acc\')plt.plot(history.epoch,history.history[\'loss\'],label=\'loss\')plt.plot(history.epoch,history.history[\'val_loss\'],label=\'val_loss\')plt.show()