fix train.py
parent
093818a947
commit
a52efec34e
|
@ -23,7 +23,7 @@ logging.basicConfig(
|
|||
|
||||
|
||||
def time_zone(sec, fmt):
|
||||
real_time = datetime.datetime.now() + datetime.timedelta(hours=8)
|
||||
real_time = datetime.datetime.now()
|
||||
return real_time.timetuple()
|
||||
|
||||
|
||||
|
|
106
tools/train.py
106
tools/train.py
|
@ -13,12 +13,13 @@
|
|||
# limitations under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
import program
|
||||
from ppcls.utils import logger
|
||||
from ppcls.utils.save_load import init_model, save_model
|
||||
from ppcls.utils.config import get_config
|
||||
from paddle.distributed import ParallelEnv
|
||||
import paddle
|
||||
from ppcls.data import Reader
|
||||
import paddle.fluid as fluid
|
||||
from ppcls.utils.config import get_config
|
||||
from ppcls.utils.save_load import init_model, save_model
|
||||
from ppcls.utils import logger
|
||||
import program
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
|
@ -53,69 +54,66 @@ def main(args):
|
|||
# assign the place
|
||||
use_gpu = config.get("use_gpu", True)
|
||||
if use_gpu:
|
||||
gpu_id = fluid.dygraph.ParallelEnv().dev_id
|
||||
place = fluid.CUDAPlace(gpu_id)
|
||||
gpu_id = ParallelEnv().dev_id
|
||||
place = paddle.CUDAPlace(gpu_id)
|
||||
else:
|
||||
place = fluid.CPUPlace()
|
||||
place = paddle.CPUPlace()
|
||||
|
||||
use_data_parallel = int(os.getenv("PADDLE_TRAINERS_NUM", 1)) != 1
|
||||
config["use_data_parallel"] = use_data_parallel
|
||||
|
||||
with fluid.dygraph.guard(place):
|
||||
net = program.create_model(config.ARCHITECTURE, config.classes_num)
|
||||
paddle.disable_static(place)
|
||||
|
||||
optimizer = program.create_optimizer(
|
||||
config, parameter_list=net.parameters())
|
||||
net = program.create_model(config.ARCHITECTURE, config.classes_num)
|
||||
|
||||
if config["use_data_parallel"]:
|
||||
strategy = fluid.dygraph.parallel.prepare_context()
|
||||
net = fluid.dygraph.parallel.DataParallel(net, strategy)
|
||||
optimizer = program.create_optimizer(
|
||||
config, parameter_list=net.parameters())
|
||||
|
||||
# load model from checkpoint or pretrained model
|
||||
init_model(config, net, optimizer)
|
||||
if config["use_data_parallel"]:
|
||||
strategy = paddle.distributed.init_parallel_env()
|
||||
net = paddle.DataParallel(net, strategy)
|
||||
|
||||
train_dataloader = program.create_dataloader()
|
||||
train_reader = Reader(config, 'train')()
|
||||
train_dataloader.set_sample_list_generator(train_reader, place)
|
||||
# load model from checkpoint or pretrained model
|
||||
init_model(config, net, optimizer)
|
||||
|
||||
if config.validate:
|
||||
valid_dataloader = program.create_dataloader()
|
||||
valid_reader = Reader(config, 'valid')()
|
||||
valid_dataloader.set_sample_list_generator(valid_reader, place)
|
||||
train_dataloader = program.create_dataloader()
|
||||
train_reader = Reader(config, 'train')()
|
||||
train_dataloader.set_sample_list_generator(train_reader, place)
|
||||
|
||||
best_top1_acc = 0.0 # best top1 acc record
|
||||
for epoch_id in range(config.epochs):
|
||||
net.train()
|
||||
# 1. train with train dataset
|
||||
program.run(train_dataloader, config, net, optimizer, epoch_id,
|
||||
'train')
|
||||
if config.validate:
|
||||
valid_dataloader = program.create_dataloader()
|
||||
valid_reader = Reader(config, 'valid')()
|
||||
valid_dataloader.set_sample_list_generator(valid_reader, place)
|
||||
|
||||
if not config["use_data_parallel"] or fluid.dygraph.parallel.Env(
|
||||
).local_rank == 0:
|
||||
# 2. validate with validate dataset
|
||||
if config.validate and epoch_id % config.valid_interval == 0:
|
||||
net.eval()
|
||||
top1_acc = program.run(valid_dataloader, config, net, None,
|
||||
epoch_id, 'valid')
|
||||
if top1_acc > best_top1_acc:
|
||||
best_top1_acc = top1_acc
|
||||
message = "The best top1 acc {:.5f}, in epoch: {:d}".format(
|
||||
best_top1_acc, epoch_id)
|
||||
logger.info("{:s}".format(
|
||||
logger.coloring(message, "RED")))
|
||||
if epoch_id % config.save_interval == 0:
|
||||
best_top1_acc = 0.0 # best top1 acc record
|
||||
for epoch_id in range(config.epochs):
|
||||
net.train()
|
||||
# 1. train with train dataset
|
||||
program.run(train_dataloader, config, net, optimizer, epoch_id,
|
||||
'train')
|
||||
|
||||
model_path = os.path.join(
|
||||
config.model_save_dir,
|
||||
config.ARCHITECTURE["name"])
|
||||
save_model(net, optimizer, model_path,
|
||||
"best_model")
|
||||
if not config["use_data_parallel"] or ParallelEnv().local_rank == 0:
|
||||
# 2. validate with validate dataset
|
||||
if config.validate and epoch_id % config.valid_interval == 0:
|
||||
net.eval()
|
||||
top1_acc = program.run(valid_dataloader, config, net, None,
|
||||
epoch_id, 'valid')
|
||||
if top1_acc > best_top1_acc:
|
||||
best_top1_acc = top1_acc
|
||||
message = "The best top1 acc {:.5f}, in epoch: {:d}".format(
|
||||
best_top1_acc, epoch_id)
|
||||
logger.info("{:s}".format(logger.coloring(message, "RED")))
|
||||
if epoch_id % config.save_interval == 0:
|
||||
|
||||
# 3. save the persistable model
|
||||
if epoch_id % config.save_interval == 0:
|
||||
model_path = os.path.join(config.model_save_dir,
|
||||
config.ARCHITECTURE["name"])
|
||||
save_model(net, optimizer, model_path, epoch_id)
|
||||
model_path = os.path.join(config.model_save_dir,
|
||||
config.ARCHITECTURE["name"])
|
||||
save_model(net, optimizer, model_path, "best_model")
|
||||
|
||||
# 3. save the persistable model
|
||||
if epoch_id % config.save_interval == 0:
|
||||
model_path = os.path.join(config.model_save_dir,
|
||||
config.ARCHITECTURE["name"])
|
||||
save_model(net, optimizer, model_path, epoch_id)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
Loading…
Reference in New Issue