ResNet50을 사용하여 분류 작업에 대한 전이 학습을 수행하려고합니다. 제 분류기를 맨 위에 올려 놓았습니다. 예측할 클래스가 28 개 있습니다 model.fit()
.
데이터 생성기를 사용하여 모델에
this is my model:
train_datagen = ImageDataGenerator(
rescale=1./255,
)
test_datagen = ImageDataGenerator(rescale=1./255)
train_dir = "/content/arabic/training"
train_generator = train_datagen.flow_from_directory(
train_dir,
color_mode = "rgb",
target_size=(32, 32),
batch_size=20,
class_mode='categorical')
validation_dir="/content/arabic/validation"
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(32, 32),
batch_size=20,
class_mode='categorical')
base_model = keras.applications.ResNet50(
weights='imagenet', # Load weights pre-trained on ImageNet.
input_shape=(32, 32, 3),
include_top=False) # Do not include the ImageNet classifier at the top.
def modelFunctionTransfer(l2 = 0.001, lr=0.001, drop = 0.2, modelName = None):
model = models.Sequential()
if(modelName != None):
model.add(modelName)
model.add(layers.Flatten())
model.add(layers.Dense(512, kernel_regularizer=regularizers.l2(l2), activation='relu'))
model.add(layers.Dense(28, activation='softmax'))
opt = keras.optimizers.Adam(learning_rate=lr)
model.compile(loss="categorical_crossentropy",
optimizer=opt,
metrics=['acc'])
return model
model = modelFunctionTransfer(l2 = 0.001, lr = 0.001, drop = 0.5, modelName=base_model)
model.summary()
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
resnet50 (Functional) (None, 1, 1, 2048) 23587712
_________________________________________________________________
flatten_1 (Flatten) (None, 2048) 0
_________________________________________________________________
dense_2 (Dense) (None, 512) 1049088
_________________________________________________________________
dense_3 (Dense) (None, 28) 14364
=================================================================
Total params: 24,651,164
Trainable params: 1,063,452
Non-trainable params: 23,587,712
그리고 여기에 오류가 발생합니다.
history = model.fit(train_generator,
steps_per_epoch = 672,
epochs = 30,
validation_data = validation_generator,
validation_steps = 30,
)
참고로 내가 받고있는 전체 오류 메시지는 다음과 같습니다.
Epoch 1/30
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-46-1dfe32a08202> in <module>()
10 history = model.fit(train_generator,
11 steps_per_epoch = 100,
---> 12 epochs = 30,
13
14 )
2 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1098 _r=1):
1099 callbacks.on_train_batch_begin(step)
-> 1100 tmp_logs = self.train_function(iterator)
1101 if data_handler.should_sync:
1102 context.async_wait()
/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
826 tracing_count = self.experimental_get_tracing_count()
827 with trace.Trace(self._name) as tm:
--> 828 result = self._call(*args, **kwds)
829 compiler = "xla" if self._experimental_compile else "nonXla"
830 new_tracing_count = self.experimental_get_tracing_count()
/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
853 # In this case we have created variables on the first call, so we run the
854 # defunned version which is guaranteed to never create variables.
--> 855 return self._stateless_fn(*args, **kwds) # pylint: disable=not-callable
856 elif self._stateful_fn is not None:
857 # Release the lock early so that multiple threads can perform the call
TypeError: 'NoneType' object is not callable
사소한 변경으로 코드를 복사했습니다. 데이터 세트가 없어서 2 개의 클래스 만있는 데이터 세트를 사용했습니다. 내가 사용한 코드는 다음과 같으며 오류없이 실행되었습니다.
train_datagen = ImageDataGenerator(
rescale=1./255,
)
test_datagen = ImageDataGenerator(rescale=1./255)
train_dir = r'c:\temp\people\train'
train_generator = train_datagen.flow_from_directory(
train_dir,
color_mode = "rgb",
target_size=(32, 32),
batch_size=20,
class_mode='categorical')
validation_dir=r'c:\temp\people\test'
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(32, 32),
batch_size=20,
class_mode='categorical')
base_model = keras.applications.ResNet50(
weights='imagenet', # Load weights pre-trained on ImageNet.
input_shape=(32, 32, 3),
include_top=False) # Do not include the ImageNet classifier at the top.
def modelFunctionTransfer(l2 = 0.001, lr=0.001, drop = 0.2, modelName = None):
model = Sequential()
if(modelName != None):
model.add(modelName)
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(512, kernel_regularizer=regularizers.l2(l2),
activation='relu'))
model.add(tf.keras.layers.Dense(2, activation='softmax'))
opt = keras.optimizers.Adam(learning_rate=.001)
model.compile(loss="categorical_crossentropy",
optimizer=opt,
metrics=['acc'])
return model
model = modelFunctionTransfer(l2 = 0.001, lr = 0.001, drop = 0.5, modelName=base_model)
model.summary()
history = model.fit(train_generator, epochs = 30, validation_data = validation_generator)
model.fit에서는 일반적으로 steps_per_epoch 및 validation_steps를 생략하고 model.fit이 내부적으로 이러한 값을 계산하도록합니다.
이 기사는 인터넷에서 수집됩니다. 재 인쇄 할 때 출처를 알려주십시오.
침해가 발생한 경우 연락 주시기 바랍니다[email protected] 삭제
몇 마디 만하겠습니다