tensorflow.keras metrics应用

不使用metrics实现的博客参考:

image-20210804195738825

1
2
import  tensorflow as tf
from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics

更改数据类型并归一化

1
2
3
4
5
6
def preprocess(x, y):

x = tf.cast(x, dtype=tf.float32) / 255.
y = tf.cast(y, dtype=tf.int32)

return x,y

加载数据集并进行预处理

1
2
3
4
5
6
7
8
9
10
batchsz = 128
(x, y), (x_val, y_val) = datasets.mnist.load_data()
print('datasets:', x.shape, y.shape, x.min(), x.max())

db = tf.data.Dataset.from_tensor_slices((x,y))
db = db.map(preprocess).shuffle(60000).batch(batchsz).repeat(10)

ds_val = tf.data.Dataset.from_tensor_slices((x_val, y_val))
ds_val = ds_val.map(preprocess).batch(batchsz)

datasets: (60000, 28, 28) (60000,) 0 255

构建模型

1
2
3
4
5
6
7
8
9
10
network = Sequential([layers.Dense(256, activation='relu'),
layers.Dense(128, activation='relu'),
layers.Dense(64, activation='relu'),
layers.Dense(32, activation='relu'),
layers.Dense(10)])
network.build(input_shape=(None, 28*28))
network.summary() # 打印模型详细信息

# 优化器
optimizer = optimizers.Adam(lr=0.01)
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense (Dense)                multiple                  200960    
_________________________________________________________________
dense_1 (Dense)              multiple                  32896     
_________________________________________________________________
dense_2 (Dense)              multiple                  8256      
_________________________________________________________________
dense_3 (Dense)              multiple                  2080      
_________________________________________________________________
dense_4 (Dense)              multiple                  330       
=================================================================
Total params: 244,522
Trainable params: 244,522
Non-trainable params: 0
_________________________________________________________________

模型训练并应用metrics

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
# 使用metrics计算准确率和loss的均值
acc_meter = metrics.Accuracy()
loss_meter = metrics.Mean()


for step, (x,y) in enumerate(db):

with tf.GradientTape() as tape:
# [b, 28, 28] => [b, 784]
x = tf.reshape(x, (-1, 28*28))
# [b, 784] => [b, 10]
out = network(x)
# [b] => [b, 10]
y_onehot = tf.one_hot(y, depth=10)
# [b]
# 使用交叉熵损失
# from_logits=True确保数据稳定性 out必须是网络直接输出的结果,不经过激活函数
loss = tf.reduce_mean(tf.losses.categorical_crossentropy(y_onehot, out, from_logits=True))

# 添加loss
loss_meter.update_state(loss)


# 梯度更新 反向传播
grads = tape.gradient(loss, network.trainable_variables)
optimizer.apply_gradients(zip(grads, network.trainable_variables))


if step % 100 == 0:

print(step, 'loss_meter:',loss_meter.result().numpy(),' loss:', loss,)
# 下一轮计算时应清零
loss_meter.reset_states()


# evaluate
if step % 500 == 0:
total, total_correct = 0., 0
# 使用前清空历史状态
acc_meter.reset_states()

for step1, (x, y) in enumerate(ds_val):
# [b, 28, 28] => [b, 784]
x = tf.reshape(x, (-1, 28*28))
# [b, 784] => [b, 10]
out = network(x)


# [b, 10] => [b]
pred = tf.argmax(out, axis=1)
pred = tf.cast(pred, dtype=tf.int32)


# 方法一
# bool type
correct = tf.equal(pred, y)
# bool tensor => int tensor => numpy
total_correct += tf.reduce_sum(tf.cast(correct, dtype=tf.int32)).numpy()
total += x.shape[0]


# 方法二
acc_meter.update_state(y, pred)


print(step, 'Evaluate Acc:', total_correct/total, acc_meter.result().numpy())
0 loss_meter: 0.0147538865   loss: tf.Tensor(0.0147538865, shape=(), dtype=float32)
78 Evaluate Acc: 0.9743 0.9743
100 loss_meter: 0.04805827   loss: tf.Tensor(0.041749448, shape=(), dtype=float32)
200 loss_meter: 0.053051163   loss: tf.Tensor(0.0131300185, shape=(), dtype=float32)
300 loss_meter: 0.07365899   loss: tf.Tensor(0.0278976, shape=(), dtype=float32)
400 loss_meter: 0.07007911   loss: tf.Tensor(0.05961611, shape=(), dtype=float32)
500 loss_meter: 0.059413455   loss: tf.Tensor(0.04578472, shape=(), dtype=float32)
78 Evaluate Acc: 0.976 0.976
600 loss_meter: 0.045514174   loss: tf.Tensor(0.1006662, shape=(), dtype=float32)
700 loss_meter: 0.05224053   loss: tf.Tensor(0.061094068, shape=(), dtype=float32)
800 loss_meter: 0.06696898   loss: tf.Tensor(0.08473669, shape=(), dtype=float32)
900 loss_meter: 0.06490257   loss: tf.Tensor(0.05812662, shape=(), dtype=float32)
1000 loss_meter: 0.056545332   loss: tf.Tensor(0.10347018, shape=(), dtype=float32)
78 Evaluate Acc: 0.9745 0.9745
1100 loss_meter: 0.0515905   loss: tf.Tensor(0.045466803, shape=(), dtype=float32)
1200 loss_meter: 0.06225908   loss: tf.Tensor(0.046285823, shape=(), dtype=float32)
1300 loss_meter: 0.057779107   loss: tf.Tensor(0.05366286, shape=(), dtype=float32)
1400 loss_meter: 0.06661249   loss: tf.Tensor(0.10940219, shape=(), dtype=float32)
1500 loss_meter: 0.059498344   loss: tf.Tensor(0.05784896, shape=(), dtype=float32)
78 Evaluate Acc: 0.974 0.974
1600 loss_meter: 0.06252271   loss: tf.Tensor(0.0287047, shape=(), dtype=float32)
1700 loss_meter: 0.060016934   loss: tf.Tensor(0.006867729, shape=(), dtype=float32)
1800 loss_meter: 0.05751593   loss: tf.Tensor(0.13693535, shape=(), dtype=float32)
1