加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
frozen_graph.py 6.10 KB
一键复制 编辑 原始数据 按行查看 历史
linzhong 提交于 2021-02-22 15:24 . add source code
import tensorflow as tf
from tensorflow.python.tools import freeze_graph
from tensorflow.python.framework import graph_util
from BigGAN import BigGAN
import argparse
from utils import *
from npu_bridge.estimator import npu_ops
from npu_bridge.estimator.npu.npu_config import NPURunConfig
from npu_bridge.estimator.npu.npu_estimator import NPUEstimator
from npu_bridge.estimator.npu.npu_optimizer import allreduce
from npu_bridge.estimator.npu.npu_optimizer import NPUDistributedOptimizer
from npu_bridge.hccl import hccl_ops
"""parsing and configuration"""
def parse_args():
desc = "Tensorflow implementation of Self-Attention GAN"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--phase', type=str, default='test', help='train or test ?')
parser.add_argument('--dataset', type=str, default='train', help='[mnist / cifar10 / celebA]')
parser.add_argument('--epoch', type=int, default=100, help='The number of epochs to run')
parser.add_argument('--iteration', type=int, default=10000, help='The number of training iterations')
#parser.add_argument('--iteration_per_loop', type=int, default=100, help='The number of sink data')
parser.add_argument('--batch_size', type=int, default=64, help='The size of batch per gpu')
parser.add_argument('--print_freq', type=int, default=1000, help='The number of image_print_freqy')
parser.add_argument('--save_freq', type=int, default=10000, help='The number of ckpt_save_freq')
parser.add_argument('--g_lr', type=float, default=0.0002, help='learning rate for generator')
parser.add_argument('--d_lr', type=float, default=0.0002, help='learning rate for discriminator')
parser.add_argument('--beta1', type=float, default=0.0, help='beta1 for Adam optimizer')
parser.add_argument('--beta2', type=float, default=0.9, help='beta2 for Adam optimizer')
parser.add_argument('--z_dim', type=int, default=128, help='Dimension of noise vector')
parser.add_argument('--sn', type=str2bool, default=True, help='using spectral norm')
parser.add_argument('--gan_type', type=str, default='hinge', help='[gan / lsgan / wgan-gp / wgan-lp / dragan / hinge]')
parser.add_argument('--ld', type=float, default=10.0, help='The gradient penalty lambda')
parser.add_argument('--n_critic', type=int, default=2, help='The number of critic')
parser.add_argument('--img_size', type=int, default=128, help='The size of image')
parser.add_argument('--sample_num', type=int, default=64, help='The number of sample images')
parser.add_argument('--test_num', type=int, default=1000, help='The number of images generated by the test')
parser.add_argument('--checkpoint_dir', type=str, default='checkpoint',
help='Directory name to save the checkpoints')
parser.add_argument('--result_dir', type=str, default='results',
help='Directory name to save the generated images')
parser.add_argument('--log_dir', type=str, default='logs',
help='Directory name to save training logs')
parser.add_argument('--sample_dir', type=str, default='samples',
help='Directory name to save the samples on training')
return check_args(parser.parse_args())
"""checking arguments"""
def check_args(args):
# --checkpoint_dir
check_folder(args.checkpoint_dir)
# --result_dir
check_folder(args.result_dir)
# --result_dir
check_folder(args.log_dir)
# --sample_dir
check_folder(args.sample_dir)
# --epoch
try:
assert args.epoch >= 1
except:
print('number of epochs must be larger than or equal to one')
# --batch_size
try:
assert args.batch_size >= 1
except:
print('batch size must be larger than or equal to one')
return args
"""main"""
def main():
# parse arguments
args = parse_args()
if args is None:
exit()
config = tf.ConfigProto(allow_soft_placement=True)
custom_op = config.graph_options.rewrite_options.custom_optimizers.add()
custom_op.name = "NpuOptimizer"
custom_op.parameter_map["enable_data_pre_proc"].b = True
custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes('allow_mix_precision')
custom_op.parameter_map["mix_compile_mode"].b = False
custom_op.parameter_map["use_off_line"].b = True
#custom_op.parameter_map["iterations_per_loop"].i = args.iteration_per_loop
from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig
config.graph_options.rewrite_options.remapping = RewriterConfig.OFF
# open session
with tf.Session(config=config) as sess:
gan = BigGAN(sess, args)
# build graph
# input batch_size can be specified
gan.batch_size = None
## -----self.inputs = tf.placeholder(tf.float32, [self.batch_size, self.img_size, self.img_size, self.c_dim], name='real_images')
## --- inputsize = [None, 224, 224, 3] This input when inference (generate images) is no need.
gan.build_model()
# show network architecture
show_all_variables()
# if args.phase == 'train' :
# # launch the graph in a session
# gan.train()
#
# # visualize learned generator
# gan.visualize_results(args.epoch - 1)
#
# print(" [*] Training finished!")
#
# if args.phase == 'test':
# gan.test()
# print(" [*] Test finished!")
tf.global_variables_initializer().run()
gan.saver = tf.train.Saver()
could_load, checkpoint_counter = gan.load(gan.checkpoint_dir)
# result_dir = os.path.join(self.result_dir, self.model_dir)
# check_folder(result_dir)
if could_load:
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
from tensorflow.python.framework import graph_util
constant_graph = graph_util.convert_variables_to_constants(
sess, sess.graph_def, ['generator/Tanh'])
with tf.gfile.FastGFile('BigGAN.pb', mode='wb') as f:
f.write(constant_graph.SerializeToString())
if __name__ == '__main__':
main()
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化