# 1.2自动梯度

```python
import torch
```

```python
# 设置跟踪向量
x = torch.ones(2, 2, requires_grad=True)
print(x)
```

```
tensor([[1., 1.],
        [1., 1.]], requires_grad=True)
```

```python
y = x + 2
y
```

```
tensor([[3., 3.],
        [3., 3.]], grad_fn=<AddBackward0>)
```

```python
help(y.grad_fn)
```

```
Help on AddBackward0 object:

class AddBackward0(object)
 |  Methods defined here:
 |  
 |  __call__(self, /, *args, **kwargs)
 |      Call self as a function.
 |  
 |  name(...)
 |  
 |  register_hook(...)
 |  
 |  ----------------------------------------------------------------------
 |  Data descriptors defined here:
 |  
 |  metadata
 |  
 |  next_functions
 |  
 |  requires_grad
```

```python
z = y  * y *3 # z =  (x+2)^2 *3
out = z.mean() # out = (x+2)^2 *3/4
z, out
```

```
(tensor([[27., 27.],
         [27., 27.]], grad_fn=<MulBackward0>),
 tensor(27., grad_fn=<MeanBackward0>))
```

```python
a = torch.randn(2, 2)
a = ((a*3)/ (a-1))
# 默认为Flase
print(a.requires_grad)
# 设置梯度为True
a.requires_grad_(True)
print(a.requires_grad)
b = (a*a).sum()
print(b.grad_fn)
```

```
False
True
<SumBackward0 object at 0x7fd230a89d90>
```

```python
# 求出out对x在x=1的偏导数
out.backward()
x.grad
```

```
tensor([[4.5000, 4.5000],
        [4.5000, 4.5000]])
```

```python
x = torch.randn(3 , requires_grad=True)

y = x *2
# norm是求L2范数，平方之和而后开方
while y.data.norm() < 1000:
    y = y *2
print(y)
```

```
tensor([ 277.2757,    3.3198, -986.0908], grad_fn=<MulBackward0>)
```

```python
v = torch.tensor([0.1, 1.0, 1.0], dtype=torch.float)
# 将向量v传递给backward
y.backward(v)
# 计算向量-雅可比积，也就是在x=v的时候的梯度
x.grad
# 下面表示2^11次方
```

```
tensor([ 204.8000, 2048.0000, 2048.0000])
```

```python
print(x.requires_grad)
print((x ** 2).requires_grad)

# 取消求梯度
with torch.no_grad():
    print((x ** 2).requires_grad)
```

```
True
True
False
```

```python
print(x.requires_grad)
# 取消求梯度
y = x.detach()
print(y.requires_grad)
print(x.eq(y))
```

```
True
False
tensor([True, True, True])
```

```python
```
