1.1数据基础
import torch
torch.cuda.is_available()
True
# 未初始化的矩阵
# 里面的值是不确定的
x = torch.empty(5,3)
print(x)
tensor([[1.6880e+25, 2.5226e-18, 6.6645e-10],
[4.1575e+21, 1.3294e-08, 2.0773e+20],
[1.6536e-04, 1.0016e-11, 8.3391e-10],
[2.1029e+20, 2.0314e+20, 3.1369e+27],
[7.0800e+31, 3.1095e-18, 1.8590e+34]])
# 构造随机初始化的矩阵
x = torch.rand(5,3)
x
tensor([[0.3129, 0.8714, 0.8079],
[0.4722, 0.3522, 0.3068],
[0.9920, 0.0171, 0.6463],
[0.1151, 0.7443, 0.1300],
[0.2816, 0.7904, 0.1833]])
# 构造一个填充0,且数据类型dtype为long的矩阵
x = torch.zeros(5, 3, dtype=torch.long)
x
tensor([[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
# 将数组转换从tensor张量
x = torch.tensor([5.5, 3])
x
tensor([5.5000, 3.0000])
# 拷贝一个值
x = x.new_ones(5, 3, dtype=torch.double) # new_* methods take in sizes
print(x)
tensor([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype=torch.float64)
# 拷贝形状,并随机赋值-1到1之间
x = torch.randn_like(x, dtype=torch.float) # override dtype!
print(x) # result has the same size
tensor([[0.1310, 0.8429, 0.9671],
[0.4961, 0.4118, 0.5708],
[0.9019, 0.1656, 0.9630],
[0.1138, 0.5194, 0.2060],
[0.0544, 0.8853, 0.4521]])
# 拷贝形状,并随机赋值0到1之间
x = torch.rand_like(x, dtype=torch.float) # override dtype!
print(x) # result has the same size
tensor([[0.2949, 0.5003, 0.8243],
[0.0197, 0.8175, 0.7986],
[0.2791, 0.1747, 0.9388],
[0.6410, 0.7757, 0.9517],
[0.5885, 0.8757, 0.0301]])
# 获取矩阵大小
print(x.size())
torch.Size([5, 3])
# 加法一,会生成新变量
y = torch.rand(5, 3)
print(x + y)
# 加法二,会生成新变量
print(torch.add(x, y))
tensor([[0.8205, 1.1127, 0.9667],
[1.0123, 1.6212, 1.7500],
[0.7720, 0.5131, 1.8308],
[1.1216, 1.0681, 1.8199],
[1.4038, 1.3346, 0.7781]])
tensor([[0.8205, 1.1127, 0.9667],
[1.0123, 1.6212, 1.7500],
[0.7720, 0.5131, 1.8308],
[1.1216, 1.0681, 1.8199],
[1.4038, 1.3346, 0.7781]])
# 加法三,设定指定的变量result为输出
result = torch.empty(5, 3)
torch.add(x, y, out=result)
print(result)
tensor([[0.8205, 1.1127, 0.9667],
[1.0123, 1.6212, 1.7500],
[0.7720, 0.5131, 1.8308],
[1.1216, 1.0681, 1.8199],
[1.4038, 1.3346, 0.7781]])
# 加法四:就地加法,不会生成新变量,但会改变其中一个参数
y.add_(x)
print(y)
tensor([[0.8205, 1.1127, 0.9667],
[1.0123, 1.6212, 1.7500],
[0.7720, 0.5131, 1.8308],
[1.1216, 1.0681, 1.8199],
[1.4038, 1.3346, 0.7781]])
# 转置矩阵
y.t_()
tensor([[0.8205, 1.0123, 0.7720, 1.1216, 1.4038],
[1.1127, 1.6212, 0.5131, 1.0681, 1.3346],
[0.9667, 1.7500, 1.8308, 1.8199, 0.7781]])
# 拷贝
x.copy_(y.t_())
# 任何使得张量发生变化的操作都需要添加下划线_
tensor([[0.8205, 1.1127, 0.9667],
[1.0123, 1.6212, 1.7500],
[0.7720, 0.5131, 1.8308],
[1.1216, 1.0681, 1.8199],
[1.4038, 1.3346, 0.7781]])
# 输出所有列,第1行
print(x[:, 1])
tensor([1.1127, 1.6212, 0.5131, 1.0681, 1.3346])
# 通过view改变形状
x = torch.randn(4, 4)
y = x.view(16)
z = x.view(-1, 8) # -1表示根据其他维度来推断
print(x.size(), y.size(), z.size())
torch.Size([4, 4]) torch.Size([16]) torch.Size([2, 8])
# 张量转化为数字
x = torch.randn(1)
print(x)
print(x.item())
tensor([1.2919])
1.291871190071106
# torch转NumPy
# 两个变量共享内存
a = torch.ones(5)
print(a)
b = a.numpy()
print(b)
a.add_(1)
print(a)
print(b)
tensor([1., 1., 1., 1., 1.])
[1. 1. 1. 1. 1.]
tensor([2., 2., 2., 2., 2.])
[2. 2. 2. 2. 2.]
# NumPy转torch
# 自动转化
import numpy as np
a = np.ones(5)
b = torch.from_numpy(a)
np.add(a, 1, out=a)
print(a)
print(b)
# CharTensor不支持转化为NumPy
[2. 2. 2. 2. 2.]
tensor([2., 2., 2., 2., 2.], dtype=torch.float64)
# 以下代码只有在PyTorch GPU版本上才会执行
if torch.cuda.is_available():
device = torch.device("cuda") # GPU
y = torch.ones_like(x, device=device) # 直接创建一个在GPU上的Tensor
x = x.to(device) # 改变环境,等价于 .to("cuda")
z = x + y
print(z)
print(z.to("cpu", torch.double)) # to()还可以同时更改数据类型
tensor([1.1416, 2.4353], device='cuda:0')
tensor([1.1416, 2.4353], dtype=torch.float64)
print(y)
tensor([1., 1.], device='cuda:0')
最后更新于
这有帮助吗?