diff --git a/requirements.txt b/requirements.txt index 241d400..fa9f4c8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,6 +20,7 @@ requests==2.14.2 scikit-learn==0.20.3 scipy==1.2.1 six==1.10.0 -torch==0.4.0 +torch>=0.4.0,<0.5.0 torchvision==0.2.1 - +tensorboard==2.1.0 +tensorboardX==2.0 diff --git a/src/train_new.py b/src/train_new.py index 8792fab..fd881fd 100644 --- a/src/train_new.py +++ b/src/train_new.py @@ -8,6 +8,7 @@ import torch import torch.nn.functional as F import torch.optim as optim +from tensorboardX import SummaryWriter from earlystopping import EarlyStopping from sample import Sampler @@ -47,6 +48,7 @@ parser.add_argument('--datapath', default="data/", help="The data path.") parser.add_argument("--early_stopping", type=int, default=0, help="The patience of earlystopping. Do not adopt the earlystopping when it equals 0.") +parser.add_argument("--no_tensorboard", default=False, help="Disable writing logs to tensorboard") # Model parameter parser.add_argument('--type', @@ -152,6 +154,10 @@ early_stopping = EarlyStopping(patience=args.early_stopping, verbose=False) print("Model is saving to: %s" % (early_stopping.fname)) +if args.no_tensorboard is False: + tb_writer = SummaryWriter( + comment=f"-dataset_{args.dataset}-type_{args.type}" + ) def get_lr(optimizer): for param_group in optimizer.param_groups: @@ -263,6 +269,13 @@ def test(test_adj, test_fea): 's_time: {:.4f}s'.format(sampling_t), 't_time: {:.4f}s'.format(outputs[5]), 'v_time: {:.4f}s'.format(outputs[6])) + + if args.no_tensorboard is False: + tb_writer.add_scalars('Loss', {'train': outputs[0], 'val': outputs[2]}, epoch) + tb_writer.add_scalars('Accuracy', {'train': outputs[1], 'val': outputs[3]}, epoch) + tb_writer.add_scalar('lr', outputs[4], epoch) + tb_writer.add_scalars('Time', {'train': outputs[5], 'val': outputs[6]}, epoch) + loss_train[epoch], acc_train[epoch], loss_val[epoch], acc_val[epoch] = outputs[0], outputs[1], outputs[2], outputs[ 3]