Skip to content

Instantly share code, notes, and snippets.

@Tooluloope
Created March 13, 2019 08:10
Show Gist options
  • Save Tooluloope/109f24e2089f41fc1be8a3383ab1c3ab to your computer and use it in GitHub Desktop.
Save Tooluloope/109f24e2089f41fc1be8a3383ab1c3ab to your computer and use it in GitHub Desktop.
def linear_activation_forward(W,A_prev,b, activation):
"""
Implement the linear part of a layer's forward propagation.
Arguments:
A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
activation -- either Sigmoid or Relu
Returns:
A -- the Activations for the next layer
cache -- a python dictionary containing Linear_cache, activation_cache ; stored for computing the backward pass efficiently
"""
if activation == 'relu':
Z,Linear_cache = linear_forward(W,A_prev,b)
A,activation_cache = relu(Z)
if activation == 'sigmoid':
Z,Linear_cache = linear_forward(W,A_prev,b)
A,activation_cache = sigmoid(Z)
cache = (Linear_cache, activation_cache)
return A, cache
def L_model_forward(X, parameters):
"""
Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation
Arguments:
X -- data, numpy array of shape (input size, number of examples)
layers_dim: represents the dimension of each hidden layer in the neural network, the index(i) represents the
layer while layers_dim[i] represents the number of nodes in that layer
Returns:
AL -- last post-activation value
caches -- list of caches containing:
every cache of linear_activation_forward() (there are L-1 of them, indexed from 0 to L-1)
"""
caches = []
A = X # Here the A is the input layer
L = len(parameters)//2 # Dividing it by two since parameter contains both W and b for each layer
# SInce its only the output layer that requires the Sigmoid Function, All layers from 1 - L-1 would use Relu function
for l in range(1,L):
A_prev = A
A, cache = linear_activation_forward(parameters['W'+str(l)],A_prev,parameters['b'+str(l)],activation ='relu')
caches.append(cache)
# for the output layer that requires sigmoid function
Al, cache = linear_activation_forward(parameters['W'+str(L)],A,parameters['b'+str(L)],activation ='sigmoid')
caches.append(cache)
return Al, caches
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment