1 torch basic use
import torch
import numpy as np
#定义一个三行两列的tensor 默认是FloatTensor,元素后面的点也是因此而来的
a = torch.Tensor([[2,2],[3,4],[4,5]])
#查看tensor
print('a is: {}'.format(a))
#显示大小
print('a size is{}'.format(a.size())) #A
print('a size is{}'.format(a.shape)) #B
#获得数据类型
print(a.type())
#获得维度
print(a.dim())
#获得元素个数
print(a.numel())
#索引其中的元素,并修改
a[1,1] = 100
print('change 11 position is {}, {}'.format(a,a[0,1]),)
2 torch trans with numpy
np_t = np.random.randn(10,20)
torch.Tensor(np_t).dim()
#np trans to tensor
torch1 = torch.Tensor(np_t)
torch2 = torch.from_numpy(np_t)
# tensor trans to np
#in CPU
np1 = torch1.numpy()
#in GPU
np2 = torch2.cpu().numpy()
# put tensor into GPU
#A
dtype = torch.cuda.FloatTensor #define default data type
gput1 = torch.randn(10,20).type(dtype) #put into GPU
#B
gput2 = torch.randn(10,20).cuda(0) #put into frist GPU
gput3 = torch.randn(10,20).cuda(1) #put into second GPU
#there is no GPU in colab so run the next time
3 Tensor basic use
x = torch.ones(2,2)
print(x , x.type())
#转化成整形
x = x.long()
print(x)
#再转化为float
x = x.float()
print(x)
x = torch.randn(4,3)
print(x)
#沿着行取最大值
max_value, max_idx = torch.max(x,dim=1)#1是每一行,0是每一列
#沿着每行取最大值
print(max_value)
#沿着每行取最大值下标
print(max_idx)
#沿行对x求和
sum_x = torch.sum(x,dim=1)
print(sum_x)
x = torch.randn(4,3)
#增加维度
print(x.shape)
print(x)
x = x.unsqueeze(0) #在第一维增加
print(x.shape)
print(x)
x = x.unsqueeze(1) #在第二维增加
print(x.shape)
print(x)
x = x.squeeze(0) #在第一维减少
print(x.shape)
print(x)
x = x.squeeze() #丢掉所有的一维
print(x.shape)
print(x)
#排列和交换维度
x = torch.randn(3,4,5)
print(x.shape)
#重新排列维度
x = x.permute(1,0,2) #1表示把原来第2个放在这里,0表示把原来第1个放在这里
print(x.shape)
#交换维度
x = x.transpose(0,2) #交换第1和3个维度
print(x.shape)
#修改维度
x = torch.randn(3,4,5)
#print(x)
x = x.view(4,-1)#-1表示该位置任意的大小,4表示变为4
print(x.shape)
#进行inplace操作,不开辟额外的空间
x = torch.ones(3,3)
print(x.shape)
x.unsqueeze_(0)
print(x.shape)
x.transpose_(2,0)
print(x.shape)
x = torch.ones(3, 3)
y = torch.ones(3, 3)
print(x)
# add 进行 inplace
x.add_(y)
print(x)
x = torch.ones(4,4)
x[1:3,1:3]=2
x
4 Variable
from torch.autograd import Variable
#.data知道对应的tensor本身
#.grad_fn知道来源
#.grad 对应的梯度
x_tensor = torch.randn(10,5)
y_tensor = torch.randn(10,5)
x = Variable(x_tensor,requires_grad=True)
y = Variable(y_tensor,requires_grad=True)
z = torch.sum(x+y)
print(z.data) #知道tensor数值
print(z.grad_fn) #知道通过sum方法得到的
x = torch.FloatTensor([2])
x = Variable(x ,requires_grad=True)
y = x**2
y.backward()
print(x.grad)
print(x.data)
print(x.grad_fn)
### AutoGrad
import torch
from torch.autograd import Variable
#能够自动进行求导操作
x = Variable(torch.FloatTensor([2]),requires_grad=True)
y = x+2
z = y**2 + 3
print(z)
z.backward()
print(x.grad)
#复杂情况的自动求导,矩阵乘法情况
x = Variable(torch.randn(10,20),requires_grad=True)
y = Variable(torch.randn(10,20),requires_grad=True)
w = Variable(torch.randn(20,20),requires_grad=True)
out = torch.mean(y - torch.matmul(x,w)) #torch.matmul矩阵乘法,维度是有讲究的其实
out.backward()
print(x.grad)
print(y.grad)
print(w.grad)