import torch from torch.autograd import Variable # 变量 x = Variable(torch.ones(2, 2), requires_grad=True) print(x) y = x + 2 print(y) z = y * y * 3 out = z.mean() print(z) print(out) # 梯度 out.backward() print(x.grad) a = torch.randn(3) a = Variable(a, requires_grad=True) b = a * 2 while b.data.norm() < 1000: b = b * 2 print(b) gradients = torch.FloatTensor([0.1, 1.0, 0.0001]) b.backward(gradients) print(a.grad)