- Notifications
You must be signed in to change notification settings - Fork 1.8k
/
Copy pathlinear_models.py
136 lines (105 loc) · 4.03 KB
/
linear_models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
# coding:utf-8
importlogging
importautograd.numpyasnp
fromautogradimportgrad
frommla.baseimportBaseEstimator
frommla.metrics.metricsimportmean_squared_error, binary_crossentropy
np.random.seed(1000)
classBasicRegression(BaseEstimator):
def__init__(self, lr=0.001, penalty="None", C=0.01, tolerance=0.0001, max_iters=1000):
"""Basic class for implementing continuous regression estimators which
are trained with gradient descent optimization on their particular loss
function.
Parameters
----------
lr : float, default 0.001
Learning rate.
penalty : str, {'l1', 'l2', None'}, default None
Regularization function name.
C : float, default 0.01
The regularization coefficient.
tolerance : float, default 0.0001
If the gradient descent updates are smaller than `tolerance`, then
stop optimization process.
max_iters : int, default 10000
The maximum number of iterations.
"""
self.C=C
self.penalty=penalty
self.tolerance=tolerance
self.lr=lr
self.max_iters=max_iters
self.errors= []
self.theta= []
self.n_samples, self.n_features=None, None
self.cost_func=None
def_loss(self, w):
raiseNotImplementedError()
definit_cost(self):
raiseNotImplementedError()
def_add_penalty(self, loss, w):
"""Apply regularization to the loss."""
ifself.penalty=="l1":
loss+=self.C*np.abs(w[1:]).sum()
elifself.penalty=="l2":
loss+= (0.5*self.C) * (w[1:] **2).sum()
returnloss
def_cost(self, X, y, theta):
prediction=X.dot(theta)
error=self.cost_func(y, prediction)
returnerror
deffit(self, X, y=None):
self._setup_input(X, y)
self.init_cost()
self.n_samples, self.n_features=X.shape
# Initialize weights + bias term
self.theta=np.random.normal(size=(self.n_features+1), scale=0.5)
# Add an intercept column
self.X=self._add_intercept(self.X)
self._train()
@staticmethod
def_add_intercept(X):
b=np.ones([X.shape[0], 1])
returnnp.concatenate([b, X], axis=1)
def_train(self):
self.theta, self.errors=self._gradient_descent()
logging.info(" Theta: %s"%self.theta.flatten())
def_predict(self, X=None):
X=self._add_intercept(X)
returnX.dot(self.theta)
def_gradient_descent(self):
theta=self.theta
errors= [self._cost(self.X, self.y, theta)]
# Get derivative of the loss function
cost_d=grad(self._loss)
foriinrange(1, self.max_iters+1):
# Calculate gradient and update theta
delta=cost_d(theta)
theta-=self.lr*delta
errors.append(self._cost(self.X, self.y, theta))
logging.info("Iteration %s, error %s"% (i, errors[i]))
error_diff=np.linalg.norm(errors[i-1] -errors[i])
iferror_diff<self.tolerance:
logging.info("Convergence has reached.")
break
returntheta, errors
classLinearRegression(BasicRegression):
"""Linear regression with gradient descent optimizer."""
def_loss(self, w):
loss=self.cost_func(self.y, np.dot(self.X, w))
returnself._add_penalty(loss, w)
definit_cost(self):
self.cost_func=mean_squared_error
classLogisticRegression(BasicRegression):
"""Binary logistic regression with gradient descent optimizer."""
definit_cost(self):
self.cost_func=binary_crossentropy
def_loss(self, w):
loss=self.cost_func(self.y, self.sigmoid(np.dot(self.X, w)))
returnself._add_penalty(loss, w)
@staticmethod
defsigmoid(x):
return0.5* (np.tanh(0.5*x) +1)
def_predict(self, X=None):
X=self._add_intercept(X)
returnself.sigmoid(X.dot(self.theta))