我有一个使用Flow_from_dataframe和Placs365 TensorFlow-version网络训练了224x224x3图像,用于二进制分类,这些网络合并以创建新模型。
当我必须
时,我有一个
vgg19
使用
flow_from_dataframe
和
ploces 365
tensorflow-version网络,用224x224x3图像进行二进制分类训练二进制分类。合并创建新模型。
当我必须评估这个新模型时,我认为我需要一个
imagedatagenerator
a
flow_from_dataframe
才能访问
.csv
文件(总计400张图像),但是我有以下错误:
ValueError: Exception encountered when calling layer 'vgg19' (type Functional).
Input 0 of layer "block1_conv1" is incompatible with the layer: expected min_ndim=4, found ndim=2. Full shape received: (None, None)
vgg19
正在接收与训练时相同的图像格式,所以我真的不明白为什么会发生这种情况。
ploces365
模型的类:
from __future__ import division, print_function
import os
import pickle
import warnings
import numpy as np
from keras import backend as K
from keras.layers import Input
from keras.layers import Activation, Dense, Flatten
from keras.layers import MaxPooling2D
from keras.models import Model
from keras.layers import Conv2D
from keras.regularizers import l2
from keras.layers import Dropout
from keras.layers import GlobalAveragePooling2D
from keras.layers import GlobalMaxPooling2D
from keras.utils import get_source_inputs
from keras.utils import get_file
from keras.preprocessing import image
from keras.applications.imagenet_utils import preprocess_input
WEIGHTS_PATH = 'https://github.com/GKalliatakis/Keras-VGG16-places365/releases/download/v1.0/vgg16-places365_weights_tf_dim_ordering_tf_kernels.h5'
WEIGHTS_PATH_NO_TOP = 'https://github.com/GKalliatakis/Keras-VGG16-places365/releases/download/v1.0/vgg16-places365_weights_tf_dim_ordering_tf_kernels_notop.h5'
def VGG16_Places365(weights='places',
input_shape=None,
pooling=None,
classes=365):
img_input = Input(shape=input_shape)
# Block 1
x = Conv2D(filters=64, kernel_size=3, strides=(1, 1), padding='same',
kernel_regularizer=l2(0.0002),
activation='relu', name='block1_conv1_365')(img_input)
x = Conv2D(filters=64, kernel_size=3, strides=(1, 1), padding='same',
kernel_regularizer=l2(0.0002),
activation='relu', name='block1_conv2_365')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="block1_pool_365", padding='valid')(x)
# Block 2
x = Conv2D(filters=128, kernel_size=3, strides=(1, 1), padding='same',
kernel_regularizer=l2(0.0002),
activation='relu', name='block2_conv1_365')(x)
x = Conv2D(filters=128, kernel_size=3, strides=(1, 1), padding='same',
kernel_regularizer=l2(0.0002),
activation='relu', name='block2_conv2_365')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="block2_pool_365", padding='valid')(x)
# Block 3
x = Conv2D(filters=256, kernel_size=3, strides=(1, 1), padding='same',
kernel_regularizer=l2(0.0002),
activation='relu', name='block3_conv1_365')(x)
x = Conv2D(filters=256, kernel_size=3, strides=(1, 1), padding='same',
kernel_regularizer=l2(0.0002),
activation='relu', name='block3_conv2_365')(x)
x = Conv2D(filters=256, kernel_size=3, strides=(1, 1), padding='same',
kernel_regularizer=l2(0.0002),
activation='relu', name='block3_conv3_365')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="block3_pool_365", padding='valid')(x)
# Block 4
x = Conv2D(filters=512, kernel_size=3, strides=(1, 1), padding='same',
kernel_regularizer=l2(0.0002),
activation='relu', name='block4_conv1_365')(x)
x = Conv2D(filters=512, kernel_size=3, strides=(1, 1), padding='same',
kernel_regularizer=l2(0.0002),
activation='relu', name='block4_conv2_365')(x)
x = Conv2D(filters=512, kernel_size=3, strides=(1, 1), padding='same',
kernel_regularizer=l2(0.0002),
activation='relu', name='block4_conv3_365')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="block4_pool_365", padding='valid')(x)
# Block 5
x = Conv2D(filters=512, kernel_size=3, strides=(1, 1), padding='same',
kernel_regularizer=l2(0.0002),
activation='relu', name='block5_conv1_365')(x)
x = Conv2D(filters=512, kernel_size=3, strides=(1, 1), padding='same',
kernel_regularizer=l2(0.0002),
activation='relu', name='block5_conv2_365')(x)
x = Conv2D(filters=512, kernel_size=3, strides=(1, 1), padding='same',
kernel_regularizer=l2(0.0002),
activation='relu', name='block5_conv3_365')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="block5_pool_365", padding='valid')(x)
inputs = img_input
# Create model.
model = Model(inputs, x, name='vgg16-places365')
# load weights
weights_path = get_file('vgg16-places365_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
model.load_weights(weights_path)
return model
预先训练的模型加载和其他方法:
import pandas as pd
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import load_model
from tensorflow.keras.optimizers import Adam
from keras.metrics import Precision, Recall
from tensorflow.keras import losses
model_vgg16_places365 = VGG16_Places365(weights='places', input_shape=(224, 224, 3))
def valid_generator(target_image_size, valid_dataset, valid_images_location):
valid_data_gen = ImageDataGenerator(rescale=1./255)
batch_size = 32
valid_generator_1 = valid_data_gen.flow_from_dataframe(
target_size=target_image_size,
dataframe=valid_dataset,
directory=valid_images_location,
x_col="id",
y_col="T1",
batch_size=batch_size
)
valid_generator_2 = valid_data_gen.flow_from_dataframe(
target_size=target_image_size,
dataframe=valid_dataset,
directory=valid_images_location,
x_col="id",
y_col="T1",
batch_size=batch_size
)
custom_generator = zip(valid_generator_1, valid_generator_2)
return custom_generator
vgg19_model_location = 'vgg19_trained.keras'
vgg19_model = load_model(vgg19_model_location)
combined_model = Model(inputs=[model_vgg16_places365.input, vgg19_model.input], outputs=vgg19_model.output)
combined_model.compile(loss=losses.BinaryCrossentropy(),
optimizer=Adam(learning_rate=0.0001),
metrics=
['accuracy',
Precision(),
Recall()])
valid_dataset = pd.read_csv('valset.csv')
valid_images_location = 'val-images/'
target_image_size = (224,224)
evaluation = combined_model.evaluate(valid_generator(target_image_size, valid_dataset, valid_images_location))