티스토리 뷰
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 | import numpy as np class NeuralNetwork(object): def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate): # Set number of nodes in input, hidden and output layers. self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # Initialize weights self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5, (self.input_nodes, self.hidden_nodes)) self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5, (self.hidden_nodes, self.output_nodes)) self.lr = learning_rate #### TODO: Set self.activation_function to your implemented sigmoid function #### # # Note: in Python, you can define a function with a lambda expression, # as shown below. #self.activation_function = lambda x : 1/ (1 + np.exp(-x)) # Replace 0 with your sigmoid calculation. ### If the lambda code above is not something you're familiar with, # You can uncomment out the following three lines and put your # implementation there instead. # def sigmoid(x): return 1/ (1 + np.exp(-x)) # Replace 0 with your sigmoid calculation here self.activation_function = sigmoid def train(self, features, targets): ''' Train the network on batch of features and targets. Arguments --------- features: 2D array, each row is one data record, each column is a feature targets: 1D array of target values ''' n_records = features.shape[0] delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape) delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape) for X, y in zip(features, targets): final_outputs, hidden_outputs = self.forward_pass_train(X) # Implement the forward pass function below # Implement the backproagation function below delta_weights_i_h, delta_weights_h_o = self.backpropagation(final_outputs, hidden_outputs, X, y, delta_weights_i_h, delta_weights_h_o) self.update_weights(delta_weights_i_h, delta_weights_h_o, n_records) def forward_pass_train(self, X): ''' Implement forward pass here Arguments --------- X: features batch ''' #### Implement the forward pass here #### ### Forward pass ### # TODO: Hidden layer - Replace these values with your calculations. hidden_inputs = np.dot(X, self.weights_input_to_hidden) # signals into hidden layer hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer # TODO: Output layer - Replace these values with your calculations. final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer final_outputs = final_inputs # signals from final output layer return final_outputs, hidden_outputs def backpropagation(self, final_outputs, hidden_outputs, X, y, delta_weights_i_h, delta_weights_h_o): ''' Implement backpropagation Arguments --------- final_outputs: output from forward pass y: target (i.e. label) batch delta_weights_i_h: change in weights from input to hidden layers delta_weights_h_o: change in weights from hidden to output layers ''' #### Implement the backward pass here #### ### Backward pass ### # TODO: Output error - Replace this value with your calculations. error = y - final_outputs # Output layer error is the difference between desired target and actual output. # TODO: Calculate the hidden layer's contribution to the error hidden_error = np.dot(error, self.weights_hidden_to_output.T) #output_error_term * self.weights_hidden_to_output # TODO: Backpropagated error terms - Replace these values with your calculations. output_error_term = error * 1.0 #output_error_term = error * final_outputs * (1 - final_outputs) hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs) # Weight step (input to hidden) delta_weights_i_h += hidden_error_term * X[:,None] # Weight step (hidden to output) delta_weights_h_o += output_error_term * hidden_outputs[:,None] return delta_weights_i_h, delta_weights_h_o def update_weights(self, delta_weights_i_h, delta_weights_h_o, n_records): ''' Update weights on gradient descent step Arguments --------- delta_weights_i_h: change in weights from input to hidden layers delta_weights_h_o: change in weights from hidden to output layers n_records: number of records ''' self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records # update hidden-to-output weights with gradient descent step self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records # update input-to-hidden weights with gradient descent step def run(self, features): ''' Run a forward pass through the network with input features Arguments --------- features: 1D array of feature values ''' #### Implement the forward pass here #### # TODO: Hidden layer - replace these values with the appropriate calculations. hidden_inputs = np.dot(features, self.weights_input_to_hidden) # signals into hidden layer hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer # TODO: Output layer - Replace these values with the appropriate calculations. final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer final_outputs = final_inputs # signals from final output layer return final_outputs ######################################################### # Set your hyperparameters here ########################################################## iterations = 3000 learning_rate = 0.65 hidden_nodes = 14 output_nodes = 1 | cs |
# my_answers.py
Your_first_neural_network.ipynb
'Deep Learning' 카테고리의 다른 글
댓글