import torch
import numpy as np
import matplotlib.pyplot as plt
# ===========================example 1-1 =============================
# 创建torch.tensor张量
flag = False
if flag:
arr = np.ones((3,3))
print('数据类型:',arr.dtype)
# t = torch.tensor(arr,device='cuda')
t = torch.tensor(arr,device='cuda')
print(t)
# ===========================example 1-2 =============================
# 使用torch.from_numpy() 创建tensor张量
flag = False
if flag:
arr = np.array([[1,2],[3,4]])
print('数据类型:', arr.dtype)
t = torch.from_numpy(arr)
print('数据类型:', t.dtype)
# 改变外部赋值
print('修改tensor,共享内存实验')
t[0,0] = 1111
print(arr)
print(t)
# ===========================example 1-3 =============================
# 依据torch直接创建tensor张量
flag = False
# flag = True
if flag:
arr = torch.tensor([[1,2],[3,4]])
t = torch.zeros((2,2),out=arr) # torch.zeros方法
print(t)
print(arr)
print(id(arr),id(t),id(arr) == id(t))
# ===========================example 1-4 =============================
# 通过torch.full创建等元素张量
flag = False
# flag = True
if flag:
arr = torch.full((3,3),10)
print(arr)
# ===========================example 1-5 =============================
# 通过torch.arange创建等差元素张量
flag = False
# flag = True
if flag:
arr = torch.arange(start=1,end=12,step=2).resize(2,3)
print(arr)
# ===========================example 1-6 =============================
# 通过torch.linspace创建均分元素张量
flag = False
# flag = True
if flag:
arr = torch.linspace(start=1,end=12,steps=10).resize(2,5)
print(arr)
# ===========================example 1-7 =============================
# 通过torch.eye创建均分元素张量
flag = False
# flag = True
if flag:
arr = torch.eye(n=2,m=3)
print(arr)
# ===========================example 1-8 =============================
# 通过torch.normal创建概率张量
flag = False
# flag = True
if flag:
# 1.mean = 张量,std = 张量
mean = torch.arange(1,5,dtype=torch.float)
std = torch.arange(1,5,dtype=torch.float)
arr = torch.normal(mean=mean,std=std)
print('mean:{} n std:{}'.format(mean,std))
print(arr)
# 2.mean = 标量,std = 标量
arr = torch.normal(mean=0.0, std=1.0,size=(4,))
print(arr)
# 3.mean = 张量,std = 标量
arr = torch.normal(mean=mean, std=1)
print(arr)
# 4.mean = 标量,std = 张量
arr = torch.normal(mean=0, std=std)
print(arr)
# ===========================example 1-9 =============================
# 通过torch.rand()创建概率张量 # size= 指定个数
# 类似的还有torch.rand_like()
# torch.randint() 参数: [low= , high= ) # 上下限
# torch.randint_like()
# torch.randperm() 参数: n 张量长度
# torch.bernoulli() 参数: input 概率值
# ===========================example 2-1 =============================
# 通过torch.cat()方法
flag = False
# flag = True
if flag:
t = torch.ones((2,3))
t_0 = torch.cat([t,t],dim=0) # 在1
t_1 = torch.cat([t,t],dim=1) # 在2
print('t_0:{} shape:{}nt_1:{} shape:{}'.format(t_0,
t_0.shape,t_1,t_1.shape))
# ===========================example 2-2 =============================
# 通过torch.stack()方法 它会创建一个新的维度
flag = False
# flag = True
if flag:
t_0 = torch.ones((2, 3))
t_1 = torch.stack([t_0, t_0,t_0,t_0], dim=0) # 在第2维度
print('t_0:{} shape:{}nt_1:{} shape:{}'.format(t_0,
t_0.shape, t_1, t_1.shape))
# ===========================example 2-3 =============================
# 通过torch.chunk()方法 它会按张量维度dim进行平均切分
# 参数 input 要切分的张量 chunks 要切分的份数 dim 要切分的维度
flag = False
# flag = True
if flag:
t_0 = torch.ones((2, 5))
t_1 = torch.chunk(t_0,chunks=2 ,dim=1)
for idx ,t in enumerate(t_1):
print('第{}个张量:{},shape is {}'.format(idx+1,t,t.shape))
# ===========================example 2-4 =============================
# 通过torch.split()方法 它会按张量维度dim进行切分
# 参数 tensor 要切分的张量 split_size_or_sections int时表示一份长度,list时,按list元素切分 dim 要切分的维度
flag = False
# flag = True
if flag:
t_0 = torch.ones((2, 5))
t_1 = torch.split(t_0,split_size_or_sections=2 ,dim=1)
for idx ,t in enumerate(t_1):
print('第{}个张量:{},shape is {}'.format(idx+1,t,t.shape))
# split_size_or_sections = [2,2,1]也可以达到如下效果
# ===========================example 2-5 =============================
# 通过torch.index_select()方法 在dim上,按index索引数据
# 参数 input 要索引的张量 dim索引的维度 index 要索引的序号
flag = False
# flag = True
if flag:
t = torch.randint(0,9,size=(3,3))
idx = torch.tensor([0,2],dtype=torch.long)
t_select = torch.index_select(t,dim=0,index=idx)
print('t:n{}nt_select:n{}'.format(t,t_select))
# ===========================example 2-6 =============================
# 通过torch.masked_select()方法 在dim上,按index索引数据
# 参数 input 要索引的张量 mask与input同形状的布尔类型张量
flag = False
# flag = True
if flag:
t = torch.randint(0, 9, size=(3, 3))
mask = t.ge(5) # 找t中大于5的,变返回bool数据类型
t_select = torch.masked_select(t,mask)
print('tn:{}nmask:n{}nt_select:n{}'.format(t,mask,t_select))
# ===========================example 2-7 =============================
# 通过torch.reshape()方法
# 参数 input 要变换的张量 shape 新张量的形状
flag = False
# flag = True
if flag:
t = torch.randint(0, 10, size=(2, 5))
t_new = torch.reshape(t,(5,2))
print('t:n{}nt_shape:n{}nt_new:n{}nt_new_shape{}'.format(t, t.shape, t_new,t_new.shape))
# ===========================example 2-8 =============================
# 通过torch.transpose()方法
# 参数 input 要变换的张量 dim0 dim1 都是交换的维度
# 相似的方法有torch.t() 只适用于2维张量
flag = False
# flag = True
if flag:
t = torch.randint(0, 10, size=(2, 5))
t_new = torch.transpose(t,0,1)
print('t:n{}nt_shape:n{}nt_new:n{}nt_new_shape{}'.format(t, t.shape, t_new,t_new.shape))
# ===========================example 2-9 =============================
# 通过torch.squeeze()方法 压缩成维度为1
# dim 若为None,移除所有长度为1的轴;若指定维度,当且仅当该轴长度为1时,可以被移除
# 相似的方法有torch.unsqueeze() 用于dim扩展维度
flag = False
# flag = True
if flag:
t = torch.rand((1,2,3,1))
# print(t)
print(t.shape)
t_sq = torch.squeeze(t)
print(t_sq.shape)
t_0 = torch.squeeze(t,dim=0)
print(t_0.shape)
t_1 = torch.squeeze(t, dim=1)
print(t_1.shape)
# ===========================example 2-10 =============================
# 张量的数学运算 -----加减乘除
# torch.add()
# torch.addcdiv()
# torch.addcmul()
# torch.sub()
# torch.div()
# torch.mul()
flag = False
# flag = True
if flag:
t_0 = torch.randn((3,3))
t_1 = torch.ones_like(t_0)
t_add = torch.add(t_0,10,t_1)
print('t_0:n{}nt_1:{}nt_add_10:n{}'.format(t_0,t_1,t_add))
# ===========================example 3-1 =============================
# 实现线性回归
torch.manual_seed(10)
lr = 0.1
# 创建数据
x = torch.rand(20,1) * 10
y = 2 * x + (5 + torch.randn(20,1))
# 构建线性回归参数
w = torch.randn((1),requires_grad=True)
b = torch.zeros((1),requires_grad=True)
for iteration in range(1000):
# 前向传播
wx = torch.mul(w,x)
y_pred = torch.add(wx,b)
# 计算MSE Loss
loss = (0.5 * (y-y_pred) ** 2).mean()
# 反向传播
loss.backward()
# 更新参数
b.data.sub_(lr * b.grad)
w.data.sub_(lr * w.grad)
if iteration % 20 == 0:
plt.scatter(x.data.numpy(),y.data.numpy())
plt.plot(x.data.numpy(),y_pred.data.numpy(),'r-',lw = 5)
plt.text(2,20,'loss=%.4f'%loss.data.numpy(),fontdict={'size':20,'color':'red'})
plt.xlim(1.5,10)
plt.ylim(8,28)
plt.title('Iteration:{}n w:{} b:{}'.format(iteration,w.data.numpy(),b.data.numpy()))
plt.pause(0.5) # 防止图片加载过快 设置站点0.5s
if loss.data.numpy() < 1:
break