Simple example of perceptrons and logistic regression for decision boundaries. I compare my own implementations with sklearn.
%matplotlib inline
from sklearn.linear_model import Perceptron
import matplotlib.pyplot as plt
import numpy as np
X = [-5,-4,-3,-2,-1,1,2,3,4,5]
Y = [1,1,1,1,1,-1,-1,-1,-1,-1]
sk_x = np.array([[x] for x in X])
X = np.array([[1, x] for x in X])
fig, ax = plt.subplots()
colormap = np.array(['g', 'r', 'b'])
ax.scatter(X[:,1], Y, c=colormap[Y])
def perceptron_okay(X, Y, gain=0.01):
theta = np.zeros(len(X[0]))
error = 1
epoch = 0
while error != 0:
error = len(Y) - 1
for i in range(len(Y)):
y_hat = 1 if np.dot(X[i], theta) >= 0 else -1
if y_hat == Y[i]:
error -= 1
for j in range(len(theta)):
delta_theta = gain * y_hat * X[i][j]
theta[j] += delta_theta
epoch += 1
if epoch > 1000:
break
return theta
def perceptron_lr(X, Y, gain=0.01):
theta = np.zeros(len(X[0]))
error = 1
epoch = 0
while error != 0:
theta += gain*np.dot(np.dot(X, theta).T, X[:,1])
epoch += 1
if epoch > 1000:
break
return theta
clf = Perceptron(tol=1e-3, random_state=0)
clf.fit(X, Y)
theta_okay = perceptron_okay(X, Y)
theta_lr = perceptron_lr(X, Y)
fig, ax = plt.subplots()
colormap = np.array(['r', 'b'])
ax.scatter(X[:,1], Y, c=colormap[Y])
line_x = np.arange(-0.1,0.1, 0.01)
plot_x = np.array([[1, x] for x in line_x])
ax.plot(line_x, np.dot(plot_x, theta_okay), label='okay')
line_x = np.arange(-5,5, 0.01)
plot_x = np.array([[1, x] for x in line_x])
ax.plot(line_x, np.dot(plot_x, theta_lr), label='lr')
line_x = np.arange(-5,5, 0.05)
plot_x = np.array([[1, x] for x in line_x])
ax.plot(line_x, np.dot(plot_x, clf.coef_[0].T), label='sklearn')
ax.legend()
from sklearn.linear_model import LogisticRegression
def logistic_regression(X, Y, alpha=0.01):
alpha /= len(X)
theta = np.ones(len(X[0]))
theta_new = np.zeros(len(X[0]))
i=0
while np.all(abs(theta_new - theta) > 1e-3):
theta = theta_new
h_theta = 1 / (1 + np.exp(-np.dot(X, theta)))
theta_new = theta + alpha * np.dot(X.T, (h_theta - Y))
i += 1
if i > 1000:
break
return theta
theta = logistic_regression(X,Y)
lf = LogisticRegression(random_state=0).fit(X, Y)
fig, ax = plt.subplots()
colormap = np.array(['r', 'b'])
ax.scatter(X[:,1], Y, c=colormap[Y])
line_x = np.arange(-0.5,0.5, 0.05)
plot_x = np.array([[1, x] for x in line_x])
ax.plot(line_x, np.dot(plot_x, theta), label='me')
line_x = np.arange(-5,5, 0.05)
plot_x = np.array([[1, x] for x in line_x])
ax.plot(line_x, np.dot(plot_x, lf.coef_[0]), label='sklearn')
ax.legend()