close
from __future__ import absolute_import, division, print_function, unicode_literals

import os
import tensorflow as tf
from tensorflow import keras
checkpoint_path="."
ckpt = tf.train.Checkpoint(encoder=encoder,
                           decoder=decoder,
                           optimizer=optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)
statusa = ckpt.restore(ckpt_manager.latest_checkpoint)

使用Checkpoint_Manager好像是比較簡單的方式

另外一種參考輕鬆學會Google Tensorflow2.0的網路,使用keras.callbacks存起來的權重使用model.load_weights來進行恢復

inputs = keras.Input(shape=(299, 299, 1))
x = layers.Flatten()(inputs)
x = layers.Dense(128, activation='relu')(x)
x = layers.Dense(256, activation='relu')(x)
x = layers.Dense(512, activation='relu')(x)
x = layers.Dense(512, activation='relu')(x)
x = layers.Dropout(0.2)(x)
x = layers.Dense(256, activation='relu')(x)
x = layers.Dropout(0.2)(x)
x = layers.Dense(64, activation='relu')(x)
x = layers.Dropout(0.2)(x)
outputs = layers.Dense(6, activation='softmax')(x) 
model=keras.Model(inputs, outputs, name='model-1')
model.load_weights("lab4-logs/models")

在進行恢復操作的時候遇到

1.Jupyter Dead

2.NotFoundError: Unsuccessful TensorSliceReader constructor: Failed to find any matching files for AAA/

經過測試發現在異地恢復的時候權重資料夾名稱必須要跟保存的時候一樣


arrow
arrow
    創作者介紹
    創作者 低階ㄇㄋ 的頭像
    低階ㄇㄋ

    蟲匯聚之所

    低階ㄇㄋ 發表在 痞客邦 留言(0) 人氣()