代码拉取完成,页面将自动刷新
同步操作将从 nnstrings/白话机器学习中的数学代码 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
import numpy as np
import matplotlib.pyplot as plt
class LogisticRegress:
def __init__(self):
self.theta = theta
def f(self, x):
return 1 / (1 + np.exp(-np.dot(x, self.theta)))
def learning(self, x, y):
epoch = 5000
ETA = 1e-3
count = 0
accuraries = []
for _ in range(epoch):
self.theta = self.theta - ETA * np.dot(self.f(x) - y, x)
result = self.classify(x) == y
accurary = len(result[result == True]) / len(result)
accuraries.append(accurary)
count += 1
# print("Times:{} theta:{}".format(count, self.theta))
plt.plot(accuraries)
plt.show()
def classify(self, x):
return (self.f(x) >= 0.5).astype(np.int)
def draw(self):
x = np.linspace(-2.5, 2.5, 100)
plt.plot(train_z[train_y == 1, 0], train_z[train_y == 1, 1], 'o')
plt.plot(train_z[train_y == 0, 0], train_z[train_y == 0, 1], 'x')
plt.plot(x, -(self.theta[0] + self.theta[1] * x + self.theta[3] * (x ** 2)) / self.theta[2])
plt.show()
def standardise(x):
return (x - mu) / sigma
def to_matrix(x):
x0 = np.ones([x.shape[0], 1])
return np.hstack([x0, x, x[:, 0, np.newaxis] ** 2])
if __name__ == "__main__":
train = np.loadtxt("data3.csv", delimiter=',', skiprows=1)
train_x = train[:, 0:2]
train_y = train[:, 2]
theta = np.random.rand(4)
mu = train_x.mean(axis=0)
sigma = train_x.std(axis=0)
train_z = standardise(train_x)
X = to_matrix(train_z)
plt.plot(train_z[train_y == 1, 0], train_z[train_y == 1, 1], 'o')
plt.plot(train_z[train_y == 0, 0], train_z[train_y == 0, 1], 'x')
plt.show()
LR = LogisticRegress()
LR.learning(X, train_y)
LR.draw()
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。