-
Notifications
You must be signed in to change notification settings - Fork 496
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Added Gradient Descent [Python] #348
Changes from 4 commits
a64e36d
84c1a6d
e7246c4
9ddb8b9
f3cb691
3f7c2d1
29ce52d
d88fe72
3556dc3
a2d3d46
c4f79bf
f0a6367
77c58df
1e0a530
6da9dab
c3d6150
c74af4b
3802026
7a0b0c9
5eea0f9
601d0fb
d592d32
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,121 @@ | ||
""" | ||
Implementation of gradient descent algorithm for minimizing cost of a linear hypothesis function. | ||
""" | ||
import numpy | ||
|
||
# List of input, output pairs | ||
train_data = (((5, 2, 3), 15), ((6, 5, 9), 25), | ||
((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41)) | ||
test_data = (((515, 22, 13), 555), ((61, 35, 49), 150)) | ||
parameter_vector = [2, 4, 1, 5] | ||
m = len(train_data) | ||
LEARNING_RATE = 0.009 | ||
|
||
|
||
def _error(example_no, data_set='train'): | ||
""" | ||
:param data_set: train data or test data | ||
:param example_no: example number whose error has to be checked | ||
:return: error in example pointed by example number. | ||
""" | ||
return calculate_hypothesis_value(example_no, data_set) - output(example_no, data_set) | ||
|
||
|
||
def _hypothesis_value(data_input_tuple): | ||
""" | ||
Calculates hypothesis function value for a given input | ||
:param data_input_tuple: Input tuple of a particular example | ||
:return: Value of hypothesis function at that point. | ||
Note that parameter input value is fixed as 1. | ||
Also known as 'biased input' inn ML terminology and the parameter associated with it | ||
is known as 'biased parameter'. | ||
""" | ||
hyp_val = 0 | ||
for i in range(len(parameter_vector) - 1): | ||
hyp_val = hyp_val + data_input_tuple[i]*parameter_vector[i+1] | ||
hyp_val = hyp_val + 1*parameter_vector[0] | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @Prakash2403 Sir , Why is this There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Line 35 and 36 can be changed to There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. As I have mentioned in comments, there is always a biased input in Artificial Neural Networks or any ML hypothesis, whose value is fixed as 1. I wanted to explicitly mention this fact in the code too, that's why I have written But now, I guess it's better to keep it |
||
return hyp_val | ||
|
||
|
||
def output(example_no, data_set): | ||
""" | ||
:param data_set: test data or train data | ||
:param example_no: example whose output is to be fetched | ||
:return: output for that example | ||
""" | ||
if data_set == 'train': | ||
return train_data[example_no][1] | ||
elif data_set == 'test': | ||
return test_data[example_no][1] | ||
|
||
|
||
def calculate_hypothesis_value(example_no, data_set): | ||
""" | ||
Calculates hypothesis value for a given example | ||
:param data_set: test data or train_data | ||
:param example_no: example whose hypothesis value is to be calculated | ||
:return: hypothesis value for that example | ||
""" | ||
if data_set == "train": | ||
return _hypothesis_value(train_data[example_no][0]) | ||
elif data_set == "test": | ||
return _hypothesis_value(test_data[example_no][0]) | ||
|
||
|
||
def summation_of_cost_derivative(index, end=m): | ||
""" | ||
Calculates the sum of cost function derivative | ||
:param index: index wrt derivative is being calculated | ||
:param end: value where summation ends, default is m, number of examples | ||
:return: Returns the summation of cost derivative | ||
Note: If index is -1, this means we are calculcating summation wrt to biased parameter. | ||
""" | ||
summation_value = 0 | ||
for i in range(end): | ||
if index == -1: | ||
summation_value += _error(i) | ||
else: | ||
summation_value += _error(i)*train_data[i][0][index] | ||
return summation_value | ||
|
||
|
||
def get_cost_derivative(index): | ||
""" | ||
:param index: index of the parameter vector wrt to derivative is to be calculated | ||
:return: derivative wrt to that index | ||
Note: If index is -1, this means we are calculcating summation wrt to biased parameter. | ||
""" | ||
cost_derivative_value = summation_of_cost_derivative(index, m)/m | ||
return cost_derivative_value | ||
|
||
|
||
def run_gradient_descent(): | ||
global parameter_vector | ||
# Tune these values to set a tolerance value for predicted output | ||
absolute_error_limit = 0.000002 | ||
relative_error_limit = 0 | ||
j = 0 | ||
while True: | ||
j = j+1 | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. can be changed to |
||
temp_parameter_vector = [0, 0, 0, 0] | ||
for i in range(0, len(parameter_vector)): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
cost_derivative = get_cost_derivative(i-1) | ||
temp_parameter_vector[i] = parameter_vector[i] - \ | ||
LEARNING_RATE*cost_derivative | ||
if numpy.allclose(parameter_vector, temp_parameter_vector, | ||
atol=absolute_error_limit, rtol=relative_error_limit): | ||
break | ||
parameter_vector = temp_parameter_vector | ||
print("Number of iterations:", j) | ||
|
||
|
||
def test_gradient_descent(): | ||
for i in range(len(test_data)): | ||
print("Actual output value:", output(i, 'test')) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
print("Hypothesis output:", calculate_hypothesis_value(i, 'test')) | ||
|
||
|
||
if __name__ == '__main__': | ||
run_gradient_descent() | ||
print("\nTesting gradient descent for a linear hypothesis function.\n") | ||
test_gradient_descent() |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
change
inn
toin