tensorboard pytorch 使用教程

tensorboard 记录Loss和评估指标曲线

import torch
from torch import nn
torch.backends.cudnn.benchmark = True
from torch.utils.tensorboard import SummaryWriter

######### Set Seeds ###########
random.seed(1234)
np.random.seed(1234)
torch.manual_seed(1234)
torch.cuda.manual_seed_all(1234)


def train():

    # --------------------------------------------------------------
    time_stamp = "{0:%Y-%m-%dT%H-%M-%S/}".format(datetime.datetime.now())
    comment = f‘bath_size={config.BATCH_SIZE}_lr={config.LR}‘
    writer = SummaryWriter(log_dir="workdir/" + time_stamp, comment=comment)
    # --------------------------------------------------------------

    for epoch in range(start_epoch, EPOCH):
        net.train()
            for step, (x, y, path_code) in enumerate((train_loader), 0):
                for index in range(1, 2):
                    tmp = torch.zeros(size=(x.size(0), 3, 3, x.size(3), x.size(4)))
                    tmp[:, :, :, :, :] = x[:, index - 1:index + 2, :, :, :]

                    tmp = tmp.cuda()
                    tmp_y = torch.zeros(size=(y.size(0), 3, y.size(3), y.size(4)))
                    tmp_y[:, :, :, :, ] = y[:, index, :, :, :]
                    tmp_y = tmp_y.cuda()

                    y_hat, output, target = net(tmp)
                    losses_moco += constrastlossprint.item()
                    losses += ssimlossprint.item()  # + vgglossprint.item()
                ### overall loss
                overall_loss = SSIM_LOSS + MOCO_LOSS
                optimizer.zero_grad()
                ### backward loss
                overall_loss.backward()
                ### update parameters
                optimizer.step()
                # profiler.step()

                losses /= (index)  # 一个视频中的一帧的平均loss
                losses_moco /= index
                # print(‘losses_pre_%d_video: ‘ % step, losses)
                losses_epoch += losses  # 所有视频的平均loss的和
                losses_moco_epoch += losses_moco


        writer.add_scalar(‘SSIM Loss‘, losses_epoch, global_step=epoch)
        writer.add_scalar(‘Moco Loss‘, losses_moco_epoch, global_step=epoch)

        scheduler.step()


if __name__ == ‘__main__‘:
    train()



torch.profiler.profile的使用 (pytorch1.8以上)

        with torch.profiler.profile(
            activities=[torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA],
            schedule=torch.profiler.schedule(
            wait=2,
            warmup=2,
            active=6,
            repeat=1),
            on_trace_ready=torch.profiler.tensorboard_trace_handler(‘./workdir‘),
            # with_trace=True
        ) as profiler:
                for step, (x, y, path_code) in enumerate((train_loader), 0):
					xxxxxxx
                ### overall loss
                overall_loss = SSIM_LOSS + MOCO_LOSS
                optimizer.zero_grad()
                ### backward loss
                overall_loss.backward()
                ### update parameters
                optimizer.step()
                profiler.step()		# 注意*******

tensorboard pytorch 使用教程

上一篇:调研打标(标签)的实现方式


下一篇:接口安全测试基础