Created
July 13, 2019 22:27
-
-
Save ksivaman/58f9a161284f23c592c00fe6244a80e5 to your computer and use it in GitHub Desktop.
One layer backward pass for feed forwards neural networks.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def one_layer_backward_pass(curr_grad, curr_weight, curr_bias, curr_out, prev_act, activation='R'): | |
#how many sample in previous activations? | |
num = prev_act.shape[1] | |
#find out what we are differentiating | |
if activation is 'R': | |
d_act_func = activations.d_relu | |
elif activation is 'S': | |
d_act_func = activations.d_sigmoid | |
else: | |
raise Exception('Nahh!') | |
#derivative of activation function | |
d_curr_out = d_act_func(curr_grad, curr_out) | |
#derivative of weight matrix | |
d_curr_weight = np.dot(d_curr_out, prev_act.T) / num | |
#derivative of bias matrix | |
d_curr_bias = np.sum(d_curr_out, axis=1, keepdims=True) / num | |
#derivative of input activations from previous layer | |
d_prev_act = np.dot(curr_weight.T, d_curr_out) | |
return d_prev_act, d_curr_weight, d_curr_bias |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment