新人写cnn的时候出现的问题 非常感激解答之人
发布于 1月前 作者 Ztesu 来自问答

在写cnn的时候出现了一个错误

ValueError: A target array with shape (1738, 3) was passed for an output of shape (None, 4) while using as loss `categorical_crossentropy`. This loss expects targets to have the same shape as the output.

code:import matplotlib.pyplot as plt

import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.layers import (
    Activation, BatchNormalization, Convolution2D, Dense, Flatten,
    MaxPooling2D)
from tensorflow.keras.models import Sequential
from tensorflow.keras.regularizers import l2
from matplotlib.pyplot import imshow
from sklearn.preprocessing import StandardScaler
from tensorflow import keras

import mypreprocess
 

path = "D:\\mat\\guzhangzhenduan\\0HP\\tupian\\fenglei"
num_classes=4  #4分类
batch_size=20
epochs=10

x_train, y_train, x_valid, y_valid, x_test, y_test= mypreprocess.prepro(d_path=path)
print(x_train.shape, y_train.shape)
print(x_valid.shape, y_valid.shape)
print(x_test.shape, y_test.shape)

# 显示图片
plt.figure(figsize=(10, 10))
for i in range(25):
    plt.subplot(5, 5, i + 1)
    plt.xticks([])
    plt.yticks([])
    image_arr=x_train[i].reshape((3, 256, 256))
    #print(image_arr.shape)(3, 256, 256)
    image_arr = image_arr.transpose((2, 1, 0))  # 通道的交换 256*256*3
    #print(image_arr.shape)(256, 256, 3)
    plt.imshow(image_arr)
plt.show()


#标准化数据
scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(
    x_train.astype(np.float32).reshape(-1, 1)).reshape(-1, 256, 256, 3)
x_valid_scaled = scaler.transform(
    x_valid.astype(np.float32).reshape(-1, 1)).reshape(-1, 256, 256, 3)
x_test_scaled = scaler.transform(
    x_test.astype(np.float32).reshape(-1, 1)).reshape(-1, 256, 256, 3)

input_shape=x_train_scaled.shape[1:]
print('输入样本维度',input_shape)
print('训练样本的维度', x_train_scaled.shape)
print('训练样本的个数', x_train_scaled.shape[0])
print('验证样本的维度', x_valid_scaled.shape)
print('验证样本的个数', x_valid_scaled.shape[0])
print('测试样本的维度', x_test_scaled.shape)
print('测试样本的个数', x_test_scaled.shape[0])
model_name = "xiaobo_cnn"
 
model=tf.keras.models.Sequential()
#输入256*256*3 输出64*64*16 ,filters=16 有16给通道 16个卷积核,kernel_size=3 卷积核的大小
model.add(Convolution2D(filters=16,kernel_size=3,padding='same',strides=1,input_shape=input_shape,kernel_regularizer=l2(1e-4)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=4))

#输入64*64*16  输出32*32*32
model.add(Convolution2D(filters=32,kernel_size=3,padding='same',strides=1,input_shape=input_shape,kernel_regularizer=l2(1e-4)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=2))

#输入32*32*32 输出16*16*64
model.add(Convolution2D(filters=64,kernel_size=3,padding='same',strides=1,input_shape=input_shape,kernel_regularizer=l2(1e-4)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=2))

#输入16*16*64 输出8*8*64
model.add(Convolution2D(filters=64,kernel_size=3,padding='same',strides=1,input_shape=input_shape,kernel_regularizer=l2(1e-4)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=2))

#8*8*64 全连接层
model.add(Flatten())

model.add(Dense(units=100,activation='relu',kernel_regularizer=l2(1e-4)))

model.add(Dense(units=num_classes,activation='softmax',kernel_regularizer=l2(1e-4)))

#编译模型
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])

#网络结构
print(model.summary())

# TensorBoard调用查看日志
tb_cb = TensorBoard(log_dir='logs/{}'.format(model_name))
# 开始模型训练
model.fit(x=x_train_scaled,y=y_train,batch_size=batch_size,epochs=epochs,verbose=1,validation_data=[x_valid_scaled,y_valid],shuffle=True,callbacks=[tb_cb])
#评估模型
score=model.evaluate(x=x_test_scaled,y=y_test,verbose=0)
print(score)
print("测试集上的损失:", score[0])
print("测试集上的准确率:", score[1])

回到顶部