conda activate ps pip install visdom
激活ps的環(huán)境,在指定的ps環(huán)境中安裝visdom
python -m visdom.server
瀏覽器輸入紅框內(nèi)的網(wǎng)址
from visdom import Visdom # 創(chuàng)建一個實(shí)例 viz=Visdom() # 創(chuàng)建一個直線,再把最新數(shù)據(jù)添加到直線上 # y x二維兩個軸,win 創(chuàng)建一個小窗口,不指定就默認(rèn)為大窗口,opts其他信息比如名稱 viz.line([1,2,3,4],[1,2,3,4],win="train_loss",opts=dict(title='train_loss')) # 更一般的情況,因?yàn)橄旅鎦 x數(shù)據(jù)不存在,只是示例 # append 添加到原來的后面,不然全部覆蓋掉 # viz.line([loss.item()],[global_step],win="train_loss",update='append')
下面主要是[[y1],[y2]],[x] 兩條映射,legend就是線條名稱
from visdom import Visdom viz=Visdom() viz.line([[1,2],[5,6]],[1,2],win="loss_acc",opts=dict(title='train loss acc',legend=['loss','acc']))
from visdom import Visdom viz=Visdom() # data 是一個batch viz.image(data.view(-1,1,28,28),win='x') viz.text(str(pred.datach().cpu().numpy()),win='pred',opts=dict(title='pred'))
動畫效果圖如下
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms from visdom import Visdom batch_size=200 learning_rate=0.01 epochs=10 train_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), # transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, transform=transforms.Compose([ transforms.ToTensor(), # transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=batch_size, shuffle=True) class MLP(nn.Module): def __init__(self): super(MLP, self).__init__() self.model = nn.Sequential( nn.Linear(784, 200), nn.LeakyReLU(inplace=True), nn.Linear(200, 200), nn.LeakyReLU(inplace=True), nn.Linear(200, 10), nn.LeakyReLU(inplace=True), ) def forward(self, x): x = self.model(x) return x device = torch.device('cuda:0') net = MLP().to(device) optimizer = optim.SGD(net.parameters(), lr=learning_rate) criteon = nn.CrossEntropyLoss().to(device) viz = Visdom() viz.line([0.], [0.], win='train_loss', opts=dict(title='train loss')) viz.line([[0.0, 0.0]], [0.], win='test', opts=dict(title='test lossacc.', legend=['loss', 'acc.'])) global_step = 0 for epoch in range(epochs): for batch_idx, (data, target) in enumerate(train_loader): data = data.view(-1, 28*28) data, target = data.to(device), target.cuda() logits = net(data) loss = criteon(logits, target) optimizer.zero_grad() loss.backward() # print(w1.grad.norm(), w2.grad.norm()) optimizer.step() global_step += 1 viz.line([loss.item()], [global_step], win='train_loss', update='append') if batch_idx % 100 == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item())) test_loss = 0 correct = 0 for data, target in test_loader: data = data.view(-1, 28 * 28) data, target = data.to(device), target.cuda() logits = net(data) test_loss += criteon(logits, target).item() pred = logits.argmax(dim=1) correct += pred.eq(target).float().sum().item() viz.line([[test_loss, correct / len(test_loader.dataset)]], [global_step], win='test', update='append') viz.images(data.view(-1, 1, 28, 28), win='x') viz.text(str(pred.detach().cpu().numpy()), win='pred', opts=dict(title='pred')) test_loss /= len(test_loader.dataset) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset)))
到此這篇關(guān)于pytorch visdom安裝開啟及使用方法的文章就介紹到這了,更多相關(guān)pytorch visdom使用內(nèi)容請搜索腳本之家以前的文章或繼續(xù)瀏覽下面的相關(guān)文章希望大家以后多多支持腳本之家!
標(biāo)簽:陽泉 克拉瑪依 雙鴨山 貴州 金華 赤峰 日照 臨汾
巨人網(wǎng)絡(luò)通訊聲明:本文標(biāo)題《pytorch visdom安裝開啟及使用方法》,本文關(guān)鍵詞 pytorch,visdom,安裝,開啟,及,;如發(fā)現(xiàn)本文內(nèi)容存在版權(quán)問題,煩請?zhí)峁┫嚓P(guān)信息告之我們,我們將及時(shí)溝通與處理。本站內(nèi)容系統(tǒng)采集于網(wǎng)絡(luò),涉及言論、版權(quán)與本站無關(guān)。