文章目录
- 1. PytorchTensor base
- 1.1 torch.tensor
- 1.2 torch.Tensor
- 1.3 torch.empty
- 1.4 torch.zeros
- 1.5 torch.zeros_like
- 1.6 torch.eye
- 1.7 torch.ones
- 1.8 torch.ones_like
- 1.9 torch.rand
- 1.10 torch.arange
- 1.11 torch.linspace
- 1.12 torch.normal
- 1.13 tensor.uniform_
- 1.14 torch.randperm
- 2. Tensor Property
-
- 3. Tensor operator
- 3.1 加法运算
- 3.2 减法运算
- 3.3 乘法运算
- 3.4 除法运算
- 3.5 矩阵运算
- 3.6 高维tensor计算
- 3.7 torch.pow
- 3.8 torch.exp
- 3.9 torch.log
- 3.10 torch.sqrt
1. PytorchTensor base
1.1 torch.tensor
import torch
for module in torch,:
print(module.__name__, module.__version__)
torch 1.9.1+cpu
a = torch.tensor([[1, 2, 3], [4, 5, 6]])
print(a)
print(a.type())
tensor([[1, 2, 3],
[4, 5, 6]])
torch.LongTensor
1.2 torch.Tensor
# 不指定数据, 会随机生成数据
b = torch.Tensor(2, 2)
print(b)
tensor([[4.5001e-39, 4.7755e-39],
[5.1429e-39, 5.9694e-39]])
# 使用tuple创建
d = torch.tensor(((1, 2), (3, 4)))
print(d.type())
print(d.type_as(a))
torch.LongTensor
tensor([[1, 2],
[3, 4]])
1.3 torch.empty
d = torch.empty(2, 3)
print(d)
print(d.type())
print(d.type_as(a))
tensor([[2.1762e-04, 1.0529e-11, 1.3721e-05],
[6.5558e-10, 2.7594e-06, 3.1948e+21]])
torch.FloatTensor
tensor([[ 0, 0, 0],
[ 0, 0, -9223372036854775808]])
1.4 torch.zeros
a = torch.tensor([[1, 2, 3], [4, 5, 6]])
d = torch.zeros(2, 3)
print(d.type())
# 将 d 的类型转换为 tensor a的类型
print(d.type_as(a).type())
torch.FloatTensor
torch.LongTensor
1.5 torch.zeros_like
d = torch.Tensor(2, 3)
print(d)
d = torch.zeros_like(d)
print(d)
print(d.type())
tensor([[0.0000e+00, 0.0000e+00, 1.7661e-04],
[2.1254e-07, 5.3736e-05, 3.3243e+21]])
tensor([[0., 0., 0.],
[0., 0., 0.]])
torch.FloatTensor
1.6 torch.eye
d = torch.eye(2, 2)
print(d.type())
print(d.type_as(a))
torch.FloatTensor
tensor([[1, 0],
[0, 1]])
1.7 torch.ones
d = torch.ones(2, 2)
print(d.type())
print(d.type_as(a))
torch.FloatTensor
tensor([[1, 1],
[1, 1]])
1.8 torch.ones_like
d = torch.tensor([1, 2, 3])
d = torch.ones_like(d)
print(d.type())
print(d.type_as(a))
torch.LongTensor
tensor([1, 1, 1])
1.9 torch.rand
d = torch.rand(2, 3)
print(d.type())
print(d)
torch.FloatTensor
tensor([[0.5115, 0.9366, 0.6257],
[0.7940, 0.5914, 0.6784]])
1.10 torch.arange
- 生成指定范围(步长)的数据
- arange(start, end, step)
d = torch.arange(2, 10, 2)
print(d.type())
print(d.type_as(a))
torch.LongTensor
tensor([2, 4, 6, 8])
1.11 torch.linspace
- 生成等间隔的数据
- linspace(start, end, step)
d = torch.linspace(10, 2, 3)
print(d.type())
print(d.type_as(a))
torch.FloatTensor
tensor([10, 6, 2])
1.12 torch.normal
- 生成正态分布的数据
- normal(mean=均值, std=方差, size=(2, 3), out=输出到指定tensor)
dd = torch.normal(mean=0, std=1, size=(2, 3), out=b)
print(b)
print(dd)
tensor([[ 0.2942, -0.3772, 0.7297],
[-0.5637, -0.6795, 1.2599]])
tensor([[ 0.2942, -0.3772, 0.7297],
[-0.5637, -0.6795, 1.2599]])
d = torch.normal(mean=torch.rand(5), std=torch.rand(5))
print(d.type())
print(d)
torch.FloatTensor
tensor([ 0.3071, 0.7098, 1.1409, -0.0179, 0.0629])
1.13 tensor.uniform_
- 生成均匀分布的数据
- uniform_(-1, 1) : 生成-1 到 1 之间均匀分布的数据
d = torch.Tensor(2, 2)
print(d)
print(d.uniform_(-1, 1))
print(d.type())
tensor([[1.5413e-01, nan],
[8.4078e-45, 0.0000e+00]])
tensor([[-0.3093, 0.1627],
[ 0.3768, -0.6339]])
torch.FloatTensor
1.14 torch.randperm
d = torch.randperm(10)
print(d.type())
print(d.type_as(a))
torch.LongTensor
tensor([6, 3, 4, 2, 0, 5, 9, 1, 8, 7])
2. Tensor Property
- 每一个Tensor有torch.dtype、torch.device、torch.layout三种属性。
- device : 表示GPU的设备, 通常用 conda:0, conda1, … 表示
- torch.device(“cpu”) 定义将 tensor 放到cpu中
- torch.device标识了torch.Tensor对象在创建之后所存储在的设备名称
- torch.layout表示torch.Tensor内存布局的对象。
2.1 稀疏张量的表示
- torch.sparse_coo_tensor
- indices : 坐标 => [[1,2,3], [0,0,0]] = (1,0) (2,0) (3,0)
- size : shape
- values : 稀疏矩阵值
import torch
# 定义将 tensor 放到cpu中
dev = torch.device("cpu")
# 定义将 tensor 放到Gpu中
# dev = torch.device("cuda")
a = torch.tensor([2, 2],
dtype=torch.float32,
device=dev)
print(a)
tensor([2., 2.])
i = torch.tensor([[0, 1, 2, 3], [0, 1, 2, 3]])
v = torch.tensor([1, 2, 3, 4])
a = torch.sparse_coo_tensor(indices=i, values=v, size=(4, 4),
dtype=torch.float32,
device=dev)
print(a)
tensor(indices=tensor([[0, 1, 2, 3],
[0, 1, 2, 3]]),
values=tensor([1., 2., 3., 4.]),
size=(4, 4), nnz=4, layout=torch.sparse_coo)
2.2 稀疏矩阵转稠密矩阵
i = torch.tensor([[0, 1, 2], [0, 1, 2]])
v = torch.tensor([1, 2, 3])
a = torch.sparse_coo_tensor(i, v, (4, 4),
dtype=torch.float32,
device=dev)
print(a.type())
print(a)
# 转稠密矩阵
print(a.to_dense())
torch.sparse.FloatTensor
tensor(indices=tensor([[0, 1, 2],
[0, 1, 2]]),
values=tensor([1., 2., 3.]),
size=(4, 4), nnz=3, layout=torch.sparse_coo)
tensor([[1., 0., 0., 0.],
[0., 2., 0., 0.],
[0., 0., 3., 0.],
[0., 0., 0., 0.]])
3. Tensor operator
3.1 加法运算
- 加法运算直接返回结果
- a + b
- a.add(b)
- torch.add(a, b)
- 修改原先的值
- a.add_(b) : 会将a+b的结果直接赋值给a
a = torch.tensor([1,2,3])
b = torch.tensor([4,5,6])
print(a)
print(b)
tensor([1, 2, 3])
tensor([4, 5, 6])
print(a + b)
print(a.add(b))
print(torch.add(a, b))
print(a)
# 会修改a的值
print(a.add_(b))
print(a)
tensor([5, 7, 9])
tensor([5, 7, 9])
tensor([5, 7, 9])
tensor([1, 2, 3])
tensor([5, 7, 9])
tensor([5, 7, 9])
3.2 减法运算
- 直接返回结果
- a - b
- torch.sub(a, b)
- a.sub(b)
- 将返回结果赋值给变量
a = torch.tensor([4,5,6])
b = torch.tensor([1,2,3])
print(a - b)
print(torch.sub(a, b))
print(a.sub(b))
print(a.sub_(b))
print(a)
tensor([3, 3, 3])
tensor([3, 3, 3])
tensor([3, 3, 3])
tensor([3, 3, 3])
tensor([3, 3, 3])
3.3 乘法运算
- 注意 : 对应的值乘以对应的值, 并不是矩阵运算
- 直接返回结果
- a * b
- torch.mul(a, b)
- a.mul(b)
- 将返回的值赋给变量
- a.mul_(b) 将结果直接赋值给a
a = torch.tensor([[1,1,1], [2,2,2], [3,3,3]])
b = torch.tensor([[1,1,1], [2,2,2], [3,3,3]])
print(a * b)
print(torch.mul(a, b))
print(a.mul(b))
print(a)
print(a.mul_(b))
print(a)
tensor([[1, 1, 1],
[4, 4, 4],
[9, 9, 9]])
tensor([[1, 1, 1],
[4, 4, 4],
[9, 9, 9]])
tensor([[1, 1, 1],
[4, 4, 4],
[9, 9, 9]])
tensor([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
tensor([[1, 1, 1],
[4, 4, 4],
[9, 9, 9]])
tensor([[1, 1, 1],
[4, 4, 4],
[9, 9, 9]])
3.4 除法运算
- 直接返回结果
- a/b
- torch.div(a, b)
- a.div(b)
- 将返回结果赋值给指定变量
- a.div_(b)
a = torch.tensor([[1,1,1], [2,2,2], [3,3,3]], dtype=torch.float32)
b = torch.tensor([[2,2,2], [2,2,2], [2,2,2]], dtype=torch.float32)
print(a/b)
print(torch.div(a, b))
print(a.div(b))
print(a.div_(b))
print(a)
tensor([[0.5000, 0.5000, 0.5000],
[1.0000, 1.0000, 1.0000],
[1.5000, 1.5000, 1.5000]])
tensor([[0.5000, 0.5000, 0.5000],
[1.0000, 1.0000, 1.0000],
[1.5000, 1.5000, 1.5000]])
tensor([[0.5000, 0.5000, 0.5000],
[1.0000, 1.0000, 1.0000],
[1.5000, 1.5000, 1.5000]])
tensor([[0.5000, 0.5000, 0.5000],
[1.0000, 1.0000, 1.0000],
[1.5000, 1.5000, 1.5000]])
tensor([[0.5000, 0.5000, 0.5000],
[1.0000, 1.0000, 1.0000],
[1.5000, 1.5000, 1.5000]])
3.5 矩阵运算
- 返回计算结果
- a @ b
- a.matmul(b)
- torch.matmul(a, b)
- torch.mm(a, b)
- a.mm(b)
a = torch.tensor([[1,1], [2,2]])
b = torch.tensor([[1,1], [2,2]])
# [1,1] * [1,2] = 3, [2,2] * [1,2] = 6
print(a @ b)
print(a.matmul(b))
print(torch.matmul(a, b))
print(torch.mm(a, b))
print(a.mm(b))
tensor([[3, 3],
[6, 6]])
tensor([[3, 3],
[6, 6]])
tensor([[3, 3],
[6, 6]])
tensor([[3, 3],
[6, 6]])
tensor([[3, 3],
[6, 6]])
3.6 高维tensor计算
# 四维向量
a = torch.ones(1, 2, 3, 4)
b = torch.ones(1, 2, 4, 3)
print(a.matmul(b).shape)
torch.Size([1, 2, 3, 3])
3.7 torch.pow
- pow(var, n) 计算var的n次方
- 直接返回结果
- torch.pow(a, 3)
- a.pow(3)
- a**3
- a.pow_(3)
- 直接返回结果, 然后将结果赋值给变量a
a = torch.tensor([1, 2])
print(torch.pow(a, 3))
print(a.pow(3))
print(a**3)
print(a.pow_(3))
print(a)
tensor([1, 8])
tensor([1, 8])
tensor([1, 8])
tensor([1, 8])
tensor([1, 8])
3.8 torch.exp
a = torch.tensor([1, 2],
dtype=torch.float32)
print(a.type())
print(torch.exp(a))
print(torch.exp_(a))
print(a.exp())
print(a.exp_())
torch.FloatTensor
tensor([2.7183, 7.3891])
tensor([2.7183, 7.3891])
tensor([ 15.1543, 1618.1781])
tensor([ 15.1543, 1618.1781])
3.9 torch.log
a = torch.tensor([10, 2],
dtype=torch.float32)
print(torch.log(a))
print(torch.log_(a))
print(a.log())
print(a.log_())
tensor([2.3026, 0.6931])
tensor([2.3026, 0.6931])
tensor([ 0.8340, -0.3665])
tensor([ 0.8340, -0.3665])
3.10 torch.sqrt
a = torch.tensor([10, 2],
dtype=torch.float32)
print(torch.sqrt(a))
print(torch.sqrt_(a))
print(a.sqrt())
print(a.sqrt_())
tensor([3.1623, 1.4142])
tensor([3.1623, 1.4142])
tensor([1.7783, 1.1892])
tensor([1.7783, 1.1892])