【深度学习】 Keras 实现Minst数据集上经典网络结构(DeepDense、LeNet、AlexNet、ZFNet)分类

本文通过使用Keras和TensorFlow框架,复现了深度全连接网络、LeNet、AlexNet和ZFNet等经典神经网络结构,对MNIST手写数字识别数据集进行训练和测试,展示了神经网络搭建和调参的全过程。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

实验简介

  本次实验一方面是熟悉Keras 序列式(Sequential)模型的使用,另一方面是复现早期的经典网络结构来学习神经网络搭建的技巧。数据集采用的是熟知的Minst手写识别,框架采用的是tensorflow、Keras,数据集和框架的导入和安装请点击这里。经典的网络结构已有大量博客进行理论分析,这里只给出代码仅供参考,关于神经网络结构的发展,推荐大家看这篇文章

DeepDense

  这个是自己定义的名字,也就是深度全连接网络。

# -*- coding: utf-8 -*-
"""
Created on Thu Jun 13 11:19:33 2019

@author: YLC
"""

from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.utils import np_utils

#数据集导入模块
(X_train, y_train), (X_test, y_test) = mnist.load_data();

#参数定义模块
img_rows, img_cols = 28,28# input dimensions
batch_size = 64
num_classes = 10
epochs = 2
img_shape = (img_rows,img_cols,1) 

#预处理 标准化模块    
X_train = X_train.reshape(len(X_train), -1)
X_test = X_test.reshape(len(X_test), -1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train = (X_train - 127) / 127
X_test = (X_test - 127) / 127

#分类转One-Hot模块
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)

#网络搭建模块
model = Sequential()

model.add(Dense(512, input_shape=(784,), kernel_initializer='he_normal'))#全连接层
model.add(Activation('relu'))
model.add(Dropout(0.2)) 

model.add(Dense(512, kernel_initializer='he_normal'))
model.add(Activation('relu'))
model.add(Dropout(0.2)) 

model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

#运行调参模块
#epochs是迭代的次数,暂定2次是为了迅速看结果,最后改成5次
model.fit(X_train, y_train, epochs=epochs, batch_size= batch_size, verbose=1, validation_split=0.05)
loss, accuracy = model.evaluate(X_test, y_test)

#输出模块
print('Test loss:', loss)
print('Accuracy:', accuracy)
model.summary()

LeNet

# -*- coding: utf-8 -*-
"""
Created on Thu Jun 13 11:19:33 2019

@author: YLC
"""

import numpy as np
import matplotlib.pyplot as plt

from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten, Dropout
from keras.utils import np_utils

#数据集导入模块
(X_train, y_train), (X_test, y_test) = mnist.load_data();

#参数定义模块
img_rows, img_cols = 28,28# input dimensions
batch_size = 64
num_classes = 10
epochs = 2
img_shape = (img_rows,img_cols,1) 

#预处理 标准化模块
X_train = X_train.reshape(-1, img_rows, img_cols, 1)  # normalize
X_test = X_test.reshape(-1, img_rows, img_cols, 1)    # normalize
X_train = X_train / 255
X_test = X_test / 255

#分类转One-Hot模块
y_train = np_utils.to_categorical(y_train, num_classes=num_classes)
y_test = np_utils.to_categorical(y_test, num_classes=num_classes)

#网络搭建模块
model = Sequential()

model.add(Conv2D(input_shape=img_shape, kernel_size=(5, 5), filters=20, activation='relu'))#卷积层
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))#池化层

model.add(Conv2D(kernel_size=(5, 5), filters=50,  activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2,2), strides=2, padding='same'))

model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])

#运行调参模块
#epochs是迭代的次数,暂定2次是为了迅速看结果,最后改成5次
model.fit(X_train, y_train, epochs = epochs, batch_size = batch_size, verbose=1, validation_split=0.05)
loss, accuracy = model.evaluate(X_test, y_test)

#输出模块
print('Test loss:', loss)
print('Accuracy:', accuracy)
model.summary()

AlexNet

# -*- coding: utf-8 -*-
"""
Created on Thu Jun 13 11:19:33 2019

@author: YLC
"""
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
import tensorflow as tf
 
#数据集导入模块
(X_train, y_train), (X_test, y_test) = mnist.load_data();

#参数定义模块
img_rows, img_cols = 28,28# input dimensions
batch_size = 64
num_classes = 10
epochs = 5
img_shape = (img_rows,img_cols,1) 
 
#预处理 标准化模块
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
X_train = X_train / 255
X_test = X_test / 255

#分类转One-Hot模块
y_train = np_utils.to_categorical(y_train, num_classes=10)
y_test = np_utils.to_categorical(y_test, num_classes=10)

#网络搭建模块
model = Sequential()

model.add(Conv2D(input_shape=img_shape, kernel_size=(11, 11), filters=96, activation='relu',strides= [1,1],padding= 'valid'))#卷积层
model.add(MaxPooling2D(pool_size=(3,3), strides=[2,2]))#池化层
model.add(BatchNormalization(axis= 1))

model.add(Conv2D(kernel_size=(5,5), filters=256, activation='relu',strides= [1,1],padding= 'same'))#卷积层
model.add(MaxPooling2D(pool_size=(3,3), strides=[2,2]))#池化层
model.add(BatchNormalization(axis= 1))

model.add(Conv2D(kernel_size=(3,3), filters=384,activation='relu',strides= [1,1],padding= 'same'))#卷积层
model.add(BatchNormalization(axis= 1))

model.add(Conv2D(kernel_size=(3,3), filters=384, activation='relu',strides= [1,1],padding= 'same'))#卷积层
model.add(BatchNormalization(axis= 1))

model.add(Conv2D(kernel_size=(3,3), filters=256, activation='relu',strides= [1,1],padding= 'same'))#卷积层
model.add(MaxPooling2D(pool_size=(3,3), strides=[2,2]))#池化层
model.add(BatchNormalization(axis= 1))

model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5)) 
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5)) 
model.add(Dense(num_classes, activation='softmax'))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
 

# 训练配置,仅供参考
model.fit(X_train, y_train, epochs=epochs, batch_size= batch_size, verbose=1, validation_split=0.05)
loss, accuracy = model.evaluate(X_test, y_test)

#输出模块
print('Test loss:', loss)
print('Accuracy:', accuracy)    
model.summary()

ZFNet

# -*- coding: utf-8 -*-
"""
Created on Thu Jun 13 11:19:33 2019

@author: YLC
"""
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
import tensorflow as tf
 
#数据集导入模块
(X_train, y_train), (X_test, y_test) = mnist.load_data();

#参数定义模块
img_rows, img_cols = 28,28# input dimensions
batch_size = 64
num_classes = 10
epochs = 5
img_shape = (img_rows,img_cols,1) 
 
#预处理 标准化模块
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)


#分类转One-Hot模块
y_train = np_utils.to_categorical(y_train, num_classes=10)
y_test = np_utils.to_categorical(y_test, num_classes=10)

#网络搭建模块
model = Sequential()

model.add(Conv2D(input_shape=img_shape, kernel_size=(7,7), filters=96, activation='relu',strides= [1,1],padding= 'valid'))#卷积层
model.add(MaxPooling2D(pool_size=(3,3), strides=[2,2]))#池化层
model.add(BatchNormalization(axis= 1))

model.add(Conv2D(kernel_size=(5,5), filters=256, activation='relu',strides= [2,2],padding= 'same'))#卷积层
model.add(MaxPooling2D(pool_size=(3,3), strides=[1,1]))#池化层
model.add(BatchNormalization(axis= 1))

model.add(Conv2D(kernel_size=(3,3), filters=384,activation='relu',strides= [1,1],padding= 'same'))#卷积层
model.add(BatchNormalization(axis= 1))

model.add(Conv2D(kernel_size=(3,3), filters=384, activation='relu',strides= [1,1],padding= 'same'))#卷积层
model.add(BatchNormalization(axis= 1))

model.add(Conv2D(kernel_size=(3,3), filters=256, activation='relu',strides= [1,1],padding= 'same'))#卷积层
model.add(MaxPooling2D(pool_size=(3,3), strides=[1,1]))#池化层
model.add(BatchNormalization(axis= 1))

model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5)) 
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5)) 
model.add(Dense(num_classes, activation='softmax'))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
 

# 训练配置,仅供参考
model.fit(X_train, y_train, epochs=epochs, batch_size= batch_size, verbose=1, validation_split=0.05)
loss, accuracy = model.evaluate(X_test, y_test)

#输出模块
print('Test loss:', loss)
print('Accuracy:', accuracy)
model.summary()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

lechuan_dafo

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值