我有以下代码,可以很好地工作.但问题是,它只对一批有效,因为我使用的是next(iter)
我创建了一个TensorFlow数据集,它必须为我的问题返回3个值(即X: [x,y,z]
).但我只需要将x
值传递给模型.我需要将所有3个值打包在一起,因为我稍后将使用y
和z
.现在,问题是,当我要呼叫fit
时,我必须以某种方式将这3个值分开,以便正确地呼叫网络架构.所以,我的问题是在这种情况下如何使用PrefetchDataset
拨打fit
.
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import Input, Dense, Activation, \
Conv2DTranspose, Conv2D, Reshape
from tensorflow.keras.models import Model
AUTOTUNE = tf.data.experimental.AUTOTUNE
def scale(X, a=-1, b=1, dtype='float32'):
if a > b:
a, b = b, a
xmin = tf.cast(tf.math.reduce_min(X), dtype=dtype)
xmax = tf.cast(tf.math.reduce_max(X), dtype=dtype)
X = (X - xmin) / (xmax - xmin)
scaled = X * (b - a) + a
return scaled, xmin, xmax
def set_shape_b(x, y, z):
x = tf.reshape(x, [16, 16, 2])
y = tf.reshape(y, [1])
z = tf.reshape(z, [1])
return x, y, z
def set_shape_a(x, y, z):
x = tf.reshape(x, [4, 4, 2])
y = tf.reshape(y, [1])
z = tf.reshape(z, [1])
return x, y, z
def First(lr):
inp = Input(lr)
x = Dense(16)(inp)
x = Reshape((4, 4, 16))(x)
x = Conv2DTranspose(2, kernel_size=3, strides=2, padding='same')(x)
x = Conv2DTranspose(2, kernel_size=3, strides=2, padding='same')(x)
output = Activation('tanh')(x)
model = Model(inp, output, name='First')
return model
def Second(hr):
inp = Input(hr)
x = Dense(16)(inp)
x = Conv2D(2, kernel_size=3, strides=2, padding='same')(x)
x = Conv2D(2, kernel_size=3, strides=2, padding='same')(x)
output = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inp, outputs=output, name='Second')
return model
def build_model(First, Second):
inp = Input(shape=INP)
gen = First(inp)
output = Second(gen)
model = Model(inputs=inp , outputs=[gen, output], name='model')
return model
# Preproces --------------- #
a = np.random.random((20, 4, 4, 2)).astype('float32')
b = np.random.random((20, 16, 16, 2)).astype('float32')
dataset_a = tf.data.Dataset.from_tensor_slices(a)
dataset_b = tf.data.Dataset.from_tensor_slices(b)
dataset_b = dataset_b.map(lambda x: tf.py_function(scale,
[x],
(tf.float32, tf.float32, tf.float32)))
dataset_b = dataset_b.map(set_shape_b)
dataset_a = dataset_a.map(lambda x: tf.py_function(scale,
[x],
(tf.float32, tf.float32, tf.float32)))
dataset_a = dataset_a.map(set_shape_a)
dataset_ones = tf.data.Dataset.from_tensor_slices(tf.ones((len(b), 4, 4, 1)))
dataset = tf.data.Dataset.zip((dataset_a, (dataset_b, dataset_ones)))
dataset = dataset.cache()
dataset = dataset.batch(2)
dataset = dataset.prefetch(buffer_size=AUTOTUNE)
# Prepare models -------------------- #
INP = (4, 4, 2)
OUT = (16, 16, 2)
first = First(INP)
second = Second(OUT)
model = build_model(first, second)
model.compile(loss=['mse', 'binary_crossentropy'],
optimizer= tf.keras.optimizers.Adam(learning_rate=1e-4))
train_l, (train_h, train_ones) = next(iter(dataset))
# train ------------------
model.fit(train_l[0],
[train_h[0], train_ones],
epochs=2)
UPDATE个
def rescale(X_scaled, xmin, xmax):
X = (xmax - xmin) * (X_scaled + 1) / 2.0 + xmin
return X
class PlotCallback(tf.keras.callbacks.Callback):
def __init__(self, image, xmin, xmax, model):
self.image = image
self.xmin = xmin
self.xmax = xmax
self.model = model
def on_epoch_end(self, epoch, logs={}):
preds = self.model.predict(self.image)
y_pred = preds[0]
y_pred = rescale(y_pred, self.xmin, self.xmax)
fig, ax = plt.subplots(figsize=(14, 10))
ax.imshow(y_pred[0][:, :, 0])
plt.close()
我正在使用上面的功能,当我试着穿的时候,我想要这样的东西:
model.fit(
dataset,
validation_data=dataset,
epochs=2,
callbacks=[PlotCallback(here_the_dataset_a_scaled_values,
xmin_from_dataset_a,
xmax_from_dataset_b, model)]
)