Course 2 - 改善深层神经网络 - 第一周作业(1&2&3) - 1.3梯度校验
1.梯度检查用于判断求导数的结果是否正确
1.1实现工具类
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(x):
"""
Compute the sigmoid of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- sigmoid(x)
"""
s = 1/(1+np.exp(-x))
return s
def relu(x):
"""
Compute the relu of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- relu(x)
"""
s = np.maximum(0,x)
return s
def dictionary_to_vector(parameters):
"""
Roll all our parameters dictionary into a single vector satisfying our specific required shape.
"""
keys = []
count = 0
for key in ["W1", "b1", "W2", "b2", "W3", "b3"]:
# flatten parameter
new_vector = np.reshape(parameters[key], (-1,1))
keys = keys + [key]*new_vector.shape[0]
if count == 0:
theta = new_vector
else:
theta = np.concatenate((theta, new_vector), axis=0)
count = count + 1
return theta, keys
def vector_to_dictionary(theta):
"""
Unroll all our parameters dictionary from a single vector satisfying our specific required shape.
"""
parameters = {}
parameters["W1"] = theta[:20].reshape((5,4))
parameters["b1"] = theta[20:25].reshape((5,1))
parameters["W2"] = theta[25:40].reshape((3,5))
parameters["b2"] = theta[40:43].reshape((3,1))
parameters["W3"] = theta[43:46].reshape((1,3))
parameters["b3"] = theta[46:47].reshape((1,1))
return parameters
def gradients_to_vector(gradients):
"""
Roll all our gradients dictionary into a single vector satisfying our specific required shape.
"""
count = 0
for key in ["dW1", "db1", "dW2", "db2", "dW3", "db3"]:
# flatten parameter
new_vector = np.reshape(gradients[key], (-1,1))
if count == 0:
theta = new_vector
else:
theta = np.concatenate((theta, new_vector), axis=0)
count = count + 1
return theta
,首先计算在一维空间中的梯度检验,然后实现在3层神经网络中的数据的梯度检验,只是在一次迭代中的梯度检验
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
import numpy as np
import gc_utils
# 1.先用一维线性梯度检验
# 1.1在一维线性中的前向传播
def forward_propagation(x, theta):
J = np.dot(theta, x)
return J
# 1.2在以为线性中的反向传播
def backward_propagation(x, theta):
dtheta = x
return dtheta
# 2.定义梯度检验函数
def gradient_check(x, theta, epsilon=1e-7):
thetaplus = theta + epsilon
thetaminus = theta - epsilon
J_plus = forward_propagation(x, thetaplus)
J_minus = forward_propagation(x, thetaminus)
gradapprox = (J_plus - J_minus) / (2 * epsilon)
# 检查gradapprox是否足够接近backward_propagation()的输出
grad = backward_propagation(x, theta)
numerator = np.linalg.norm(grad - gradapprox)
denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox)
different = numerator / denominator
if different < 1e-7:
print("梯度检查:梯度正常")
else:
print("梯度检查:梯度超出阈值")
return different
# 1.3测试
# 测试forward_propagation
# print("-----------------测试forward_propagation-----------------")
# x, theta = 2, 4
# J = forward_propagation(x, theta)
# print ("J = " + str(J))
# # 测试backward_propagation
# print("-----------------测试backward_propagation-----------------")
# x, theta = 2, 4
# dtheta = backward_propagation(x, theta)
# print ("dtheta = " + str(dtheta))
# 测试gradient_check
# print("-----------------测试gradient_check-----------------")
# difference = gradient_check(x, theta)
# print("difference = " + str(difference))
# ===================================2 在3维中的数据变化======================
# 2.1计算3维的前向传播
def forward_propagation_n(X, Y, parameters):
m = X.shape[1]
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = gc_utils.relu(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = gc_utils.relu(Z2)
Z3 = np.dot(W3, A2) + b3
A3 = gc_utils.sigmoid(Z3)
# 计算成本
logprobs = np.multiply(-np.log(A3), Y) + np.multiply(-np.log(1 - A3), 1 - Y)
cost = (1 / m) * np.sum(logprobs)
cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)
return cost, cache
# 2.2计算3维的后向传播
def backward_propagation_n(X, Y, cache):
"""
实现图中所示的反向传播。
参数:
X - 输入数据点(输入节点数量,1)
Y - 标签
cache - 来自forward_propagation_n()的cache输出
返回:
gradients - 一个字典,其中包含与每个参数、**和**前变量相关的成本梯度。
"""
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = (1. / m) * np.dot(dZ3, A2.T)
db3 = (1. / m) * np.sum(dZ3, axis=1, keepdims=True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = (1. / m) * np.dot(dZ2, A1.T)
db2 = (1. / m) * np.sum(dZ2, axis=1, keepdims=True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = (1. / m) * np.dot(dZ1, X.T)
db1 = (1. / m) * np.sum(dZ1, axis=1, keepdims=True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,
"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2,
"dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
# 2.3计算高维的梯度检查
def gradient_check_n(parameters, gradients, X, Y, epsilon=1e-7):
# 初始化参数
parameters_values, keys = gc_utils.dictionary_to_vector(parameters)
grad = gc_utils.gradients_to_vector(gradients)
num_parameters = parameters_values.shape[0]
J_plus = np.zeros((num_parameters, 1))
J_minus = np.zeros((num_parameters, 1))
gradspprox = np.zeros((num_parameters, 1))
# 计算gradapprox
for i in range(num_parameters):
thetaplus = np.copy(parameters_values)
thetaplus[i][0] = thetaplus[i][0] + epsilon
J_plus[i], cache = forward_propagation_n(X, Y, gc_utils.vector_to_dictionary(thetaplus))
thetaminus = np.copy(parameters_values)
thetaminus[i][0] = thetaminus[i][0] - epsilon
J_minus[i], cache = forward_propagation_n(X, Y, gc_utils.vector_to_dictionary(thetaminus))
gradspprox[i] = (J_plus[i] - J_minus[i]) / (2 * epsilon)
# 通过计算比较差异gradapprox和后向传播梯度
numerator = np.linalg.norm(grad - gradspprox)
denominator = np.linalg.norm(grad) + np.linalg.norm(gradspprox)
difference = numerator / denominator
if difference < 1e-7:
print("梯度检查:梯度正常: " + str(difference))
else:
print("梯度检查:范围超出阈值" + str(difference))
return difference
def gradient_check_n_test_case():
np.random.seed(1)
x = np.random.randn(4, 3)
y = np.array([1, 1, 0])
W1 = np.random.randn(5, 4)
b1 = np.random.randn(5, 1)
W2 = np.random.randn(3, 5)
b2 = np.random.randn(3, 1)
W3 = np.random.randn(1, 3)
b3 = np.random.randn(1, 1)
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2,
"W3": W3,
"b3": b3}
return x, y, parameters
X, Y, parameters = gradient_check_n_test_case()
cost, cache = forward_propagation_n(X, Y, parameters)
gradients = backward_propagation_n(X, Y, cache)
difference = gradient_check_n(parameters, gradients, X, Y, epsilon=1e-6)