我试图复制the code in this web,但将模型实现为一个类,并进行了一些修改.

%matplotlib inline
from itertools import islice
import itertools
from itertools import count 

import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tqdm import trange
from tensorflow.keras import Model
from keras.utils import to_categorical


(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()

x_train, x_test = x_train[..., np.newaxis]/255.0, x_test[..., np.newaxis]/255.0

x_train = x_train.reshape((60000, 28, 28, 1))
x_train= x_train.astype('float32') / 255 # rescale pixel values from range [0, 255] to [0, 1]

x_test = x_test.reshape((10000, 28, 28, 1))
x_test = x_test.astype('float32') / 255

train_labels = to_categorical(y_train)
test_labels = to_categorical(y_test)

print("Number of original training examples:", len(x_train))
print("Number of original test examples:", len(x_test))

data_mean = 0.1307
data_std = 0.3081

# Normalize the data
x_train = (x_train/255.0 - data_mean) / data_std
x_test = (x_test/255.0 - data_mean) / data_std
num_classes = 10
input_shape = (28, 28, 1)

# Decay the learning rate at a base rate of gamma roughly every epoch, which
# is len(x_train) steps
scheduler = tf.keras.optimizers.schedules.ExponentialDecay(
    1,
    decay_steps=len(x_train),
    decay_rate=0.7)

# Define the optimizer to user for gradient descent
optimizer = tf.keras.optimizers.Adadelta(scheduler)
loss_object = tf.keras.losses.CategoricalCrossentropy()

train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.CategoricalCrossentropy(name='train_accuracy')

test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.CategoricalCrossentropy(name='test_accuracy')

@tf.function
def train_step(images, labels):
    with tf.GradientTape() as tape:
        predictions = model(images)
        loss = loss_object(labels, predictions)
    gradients = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))

    train_loss(loss)
    train_accuracy(labels, predictions)

@tf.function
def test_step(images, labels):
    predictions = model(images)
    t_loss = loss_object(labels, predictions)

    test_loss(t_loss)
    test_accuracy(labels, predictions)


class MyModel(tf.keras.models.Sequential):
    def __init__(self):
        super(MyModel, self).__init__()
        self.c1 = tf.keras.layers.Conv2D(32, (3,3), strides=(1,1),
                                      padding='valid', 
                                      activation='relu',
                                      input_shape=input_shape),
        self.max2d = tf.keras.layers.MaxPool2D(),
        self.c2 = tf.keras.layers.Conv2D(64, (3,3), strides=(1,1),
                                        padding='valid',
                                        activation='relu'),
        self.dropout1 = tf.keras.layers.Dropout(0.25),
        self.flatten = tf.keras.layers.Flatten(),
        self.dense1 = tf.keras.layers.Dense(64, activation='relu'),
        self.dropout2 = tf.keras.layers.Dropout(0.5),
        self.dense2 = tf.keras.layers.Dense(num_classes, activation='softmax')

    def call(self, x):
        x = self.c1(x)
        x = self.c2 (x)
        x = self.max2d(x)
        x = self.dropout1(x)
        x = self.flatten(x)
        x = self.dense1(x)
        x = self.dropout2(x)
        x = self.dense2(x)
        return x

model = MyModel()

train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(10000).batch(32)
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)

EPOCHS = 5

for epoch in range(EPOCHS):
    for images, labels in train_ds:        
        train_step(images, labels)  # <----- Boom!!

    for test_images, test_labels in test_ds:
        test_step(test_images, test_labels)

我在Visual Studio代码中遇到的完整错误是:

TypeError                                 Traceback (most recent call last)
d:\Machine_Learning\tensorflow_test.ipynb Cell 5' in <module>
      6 for epoch in range(EPOCHS):
      7     for images, labels in train_ds:        
----> 8         train_step(images, labels)
     10     for test_images, test_labels in test_ds:
     11         test_step(test_images, test_labels)

File c:\Python38\lib\site-packages\tensorflow\python\util\traceback_utils.py:153, in filter_traceback.<locals>.error_handler(*args, **kwargs)
    151 except Exception as e:
    152   filtered_tb = _process_traceback_frames(e.__traceback__)
--> 153   raise e.with_traceback(filtered_tb) from None
    154 finally:
    155   del filtered_tb

File ~\AppData\Local\Temp\__autograph_generated_file14pr1250.py:9, in outer_factory.<locals>.inner_factory.<locals>.tf__train_step(images, labels)
      7 with ag__.FunctionScope('train_step', 'fscope', ag__.ConversionOptions(recursive=True, user_requested=True, optional_features=(), internal_convert_user_code=True)) as fscope:
      8     with ag__.ld(tf).GradientTape() as tape:
----> 9         predictions = ag__.converted_call(ag__.ld(model), (ag__.ld(images),), None, fscope)
     10         loss = ag__.converted_call(ag__.ld(loss_object), (ag__.ld(labels), ag__.ld(predictions)), None, fscope)
     11     gradients = ag__.converted_call(ag__.ld(tape).gradient, (ag__.ld(loss), ag__.ld(model).trainable_variables), None, fscope)

File c:\Python38\lib\site-packages\keras\utils\traceback_utils.py:69, in filter_traceback.<locals>.error_handler(*args, **kwargs)
     66   filtered_tb = _process_traceback_frames(e.__traceback__)
     67   # To get the full stack trace, call:
     68   # `tf.debugging.disable_traceback_filtering()`
---> 69   raise e.with_traceback(filtered_tb) from None
     70 finally:
     71   del filtered_tb

File ~\AppData\Local\Temp\__autograph_generated_filecwsasoc4.py:10, in outer_factory.<locals>.inner_factory.<locals>.tf__call(self, x)
      8 do_return = False
      9 retval_ = ag__.UndefinedReturnValue()
---> 10 x = ag__.converted_call(ag__.ld(self).c1, (ag__.ld(x),), None, fscope)
     11 x = ag__.converted_call(ag__.ld(self).c2, (ag__.ld(x),), None, fscope)
     12 x = ag__.converted_call(ag__.ld(self).max2d, (ag__.ld(x),), None, fscope)

TypeError: in user code:

    File "C:\Users\user\AppData\Local\Temp\ipykernel_17788\3342806354.py", line 21, in train_step  *
        predictions = model(images)
    File "c:\Python38\lib\site-packages\keras\utils\traceback_utils.py", line 69, in error_handler  **
        raise e.with_traceback(filtered_tb) from None
    File "C:\Users\user\AppData\Local\Temp\__autograph_generated_filecwsasoc4.py", line 10, in tf__call
        x = ag__.converted_call(ag__.ld(self).c1, (ag__.ld(x),), None, fscope)

    TypeError: Exception encountered when calling layer "my_model_7" (type MyModel).
    
    in user code:
    
        File "C:\Users\user\AppData\Local\Temp\ipykernel_17788\2012441124.py", line 56, in call  *
            x = self.c1(x)
    
        TypeError: '_TupleWrapper' object is not callable
    
    
    Call arguments received by layer "my_model_7" (type MyModel):
      • x=tf.Tensor(shape=(32, 28, 28, 1), dtype=float32)

所以,我知道我正在向c1层传递一个元组,而其他数据类型是需要的.如何修复此错误?

Python版本:3.8.3 x64

Tensorflow版本:2.10.0-dev20220517

硬版本:2.10.0

操作系统:windows 10

Edit:

愚蠢的尾随逗号让我浪费了两天的调试时间.声明模型和修复错误的正确方法是:

class MyModel(tf.keras.models.Sequential):
    def __init__(self):
        super(MyModel, self).__init__()
        self.c1 = tf.keras.layers.Conv2D(32, (3,3), strides=(1,1),
                                      padding='valid', 
                                      activation='relu',
                                      input_shape=input_shape)
        self.max2d = tf.keras.layers.MaxPool2D()
        self.c2 = tf.keras.layers.Conv2D(64, (3,3), strides=(1,1),
                                        padding='valid',
                                        activation='relu')
        self.dropout1 = tf.keras.layers.Dropout(0.25)
        self.flatten = tf.keras.layers.Flatten()
        self.dense1 = tf.keras.layers.Dense(64, activation='relu')
        self.dropout2 = tf.keras.layers.Dropout(0.5)
        self.dense2 = tf.keras.layers.Dense(num_classes, activation='softmax')

    def call(self, x):
        x = self.c1(x)
        x = self.c2 (x)
        x = self.max2d(x)
        x = self.dropout1(x)
        x = self.flatten(x)
        x = self.dense1(x)
        x = self.dropout2(x)
        x = self.dense2(x)
        return x

万分感谢,彼得罗·丹托诺!

推荐答案

查看错误消息(TypeError: '_TupleWrapper' object is not callable),我猜这些尾随逗号会导致类属性被解释为元组

class MyModel(tf.keras.models.Sequential):
    def __init__(self):
        super(MyModel, self).__init__()
        self.c1 = tf.keras.layers.Conv2D(32, (3,3), strides=(1,1),
                                      padding='valid', 
                                      activation='relu',
                                      input_shape=input_shape)  # REMOVE TR. COMMAS,
        self.max2d = tf.keras.layers.MaxPool2D()  # ,
        self.c2 = tf.keras.layers.Conv2D(64, (3,3), strides=(1,1),
                                        padding='valid',
                                        activation='relu')  # ,
        self.dropout1 = tf.keras.layers.Dropout(0.25)  # ,
        self.flatten = tf.keras.layers.Flatten()  # ,
        self.dense1 = tf.keras.layers.Dense(64, activation='relu')  # ,
        self.dropout2 = tf.keras.layers.Dropout(0.5)  # ,
        self.dense2 = tf.keras.layers.Dense(num_classes, activation='softmax')

    def call(self, x):
        x = self.c1(x)
        x = self.c2 (x)
        x = self.max2d(x)
        x = self.dropout1(x)
        x = self.flatten(x)
        x = self.dense1(x)
        x = self.dropout2(x)
        x = self.dense2(x)
        return x

model = MyModel()

Python相关问答推荐

如何终止带有队列的Python进程?+ 队列大小的错误?

如何在PIL、Python中对图像应用彩色面膜?

根据在同一数据框中的查找向数据框添加值

如何让剧作家等待Python中出现特定cookie(然后返回它)?

对整个 pyramid 进行分组与对 pyramid 列子集进行分组

运行总计基于多列pandas的分组和总和

对于一个给定的数字,找出一个整数的最小和最大可能的和

将两只Pandas rame乘以指数

Polars:用氨纶的其他部分替换氨纶的部分

在Django admin中自动完成相关字段筛选

转换为浮点,pandas字符串列,混合千和十进制分隔符

不能使用Gekko方程'

Python Pandas获取层次路径直到顶层管理

从Windows Python脚本在WSL上运行Linux应用程序

手动设置seborn/matplotlib散点图连续变量图例中显示的值

Polars map_使用多处理对UDF进行批处理

Python日志(log)库如何有效地获取lineno和funcName?

极柱内丢失类型信息""

根据过滤后的牛郎星图表中的数据计算新系列

多索引数据帧到标准索引DF