conv 연산을 위한 zero padding:
# GRADED FUNCTION: zero_pad
def zero_pad(X, pad):
"""
Pad with zeros all images of the dataset X. The padding is applied to the height and width of an image,
as illustrated in Figure 1.
Argument:
X -- python numpy array of shape (m, n_H, n_W, n_C) representing a batch of m images
pad -- integer, amount of padding around each image on vertical and horizontal dimensions
Returns:
X_pad -- padded image of shape (m, n_H + 2 * pad, n_W + 2 * pad, n_C)
"""
#(≈ 1 line)
# X_pad = None
# YOUR CODE STARTS HERE
X_pad = np.pad(X, ((0, 0), (pad, pad), (pad, pad), (0,0)), mode='constant', constant_values = 0)
# YOUR CODE ENDS HERE
return X_pad
np.pad 에 대한 이해 필요..
어렵다........
# GRADED FUNCTION: conv_forward
def conv_forward(A_prev, W, b, hparameters):
"""
Implements the forward propagation for a convolution function
Arguments:
A_prev -- output activations of the previous layer,
numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
W -- Weights, numpy array of shape (f, f, n_C_prev, n_C)
b -- Biases, numpy array of shape (1, 1, 1, n_C)
hparameters -- python dictionary containing "stride" and "pad"
Returns:
Z -- conv output, numpy array of shape (m, n_H, n_W, n_C)
cache -- cache of values needed for the conv_backward() function
"""
# Retrieve dimensions from A_prev's shape (≈1 line)
# (m, n_H_prev, n_W_prev, n_C_prev) = None
# Retrieve dimensions from W's shape (≈1 line)
# (f, f, n_C_prev, n_C) = None
# Retrieve information from "hparameters" (≈2 lines)
# stride = None
# pad = None
# Compute the dimensions of the CONV output volume using the formula given above.
# Hint: use int() to apply the 'floor' operation. (≈2 lines)
# n_H = None
# n_W = None
# Initialize the output volume Z with zeros. (≈1 line)
# Z = None
# Create A_prev_pad by padding A_prev
# A_prev_pad = None
# for i in range(None): # loop over the batch of training examples
# a_prev_pad = None # Select ith training example's padded activation
# for h in range(None): # loop over vertical axis of the output volume
# Find the vertical start and end of the current "slice" (≈2 lines)
# vert_start = None
# vert_end = None
# for w in range(None): # loop over horizontal axis of the output volume
# Find the horizontal start and end of the current "slice" (≈2 lines)
# horiz_start = None
# horiz_end = None
# for c in range(None): # loop over channels (= #filters) of the output volume
# Use the corners to define the (3D) slice of a_prev_pad (See Hint above the cell). (≈1 line)
# a_slice_prev = None
# Convolve the (3D) slice with the correct filter W and bias b, to get back one output neuron. (≈3 line)
# weights = None
# biases = None
# Z[i, h, w, c] = None
# YOUR CODE STARTS HERE
# Retrieve dimensions from A_prev's shape (≈1 line)
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve dimensions from W's shape (≈1 line)
(f, f, n_C_prev, n_C) = W.shape
# Retrieve information from "hparameters" (≈2 lines)
stride = hparameters['stride']
pad = hparameters['pad']
# Compute the dimensions of the CONV output volume using the formula given above.
# Hint: use int() to apply the 'floor' operation. (≈2 lines)
n_H = int((n_H_prev + (2*pad) - f)/stride) + 1
n_W = int((n_W_prev + (2*pad) - f)/stride) + 1
# Initialize the output volume Z with zeros. (≈1 line)
Z = np.zeros((m, n_H, n_W, n_C))
# Create A_prev_pad by padding A_prev
A_prev_pad = zero_pad(A_prev, pad)
for i in range(m): # loop over the batch of training examples
a_prev_pad = A_prev_pad[i] # Select ith training example's padded activation
for h in range(n_H): # loop over vertical axis of the output volume
# Find the vertical start and end of the current "slice" (≈2 lines)
vert_start = h * stride
vert_end = vert_start + f
for w in range(n_W): # loop over horizontal axis of the output volume
# Find the horizontal start and end of the current "slice" (≈2 lines)
horiz_start = w * stride
horiz_end = horiz_start + f
for c in range(n_C): # loop over channels (= #filters) of the output volume
# Use the corners to define the (3D) slice of a_prev_pad (See Hint above the cell). (≈1 line)
a_slice_prev = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :]
# Convolve the (3D) slice with the correct filter W and bias b, to get back one output neuron. (≈3 line)
weights = W[:, :, :, c]
biases = b[:, :, :, c]
Z[i, h, w, c] = conv_single_step(a_slice_prev, weights, biases)
# YOUR CODE ENDS HERE
# Save information in "cache" for the backprop
cache = (A_prev, W, b, hparameters)
return Z, cache
'진로 > 구글 머신러닝 부트캠프' 카테고리의 다른 글
2022 구글 머신러닝 부트캠프 6주차 이야기 (0) | 2022.08.06 |
---|---|
2022 구글 머신러닝 부트캠프 5주차 이야기 (0) | 2022.08.01 |
2022 구글 머신러닝 부트캠프 4주차 이야기 (0) | 2022.07.23 |
2022 구글 머신러닝 부트캠프 3주차 이야기 (0) | 2022.07.17 |
2022 구글 머신러닝 부트캠프 2주차 이야기 (0) | 2022.07.11 |