代码拉取完成,页面将自动刷新
同步操作将从 ldy1118/python-基于mnist数据集的神经网络算法 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
import numpy as np
from functions import sigmoid, sigmoid_grad, softmax, cross_entropy_error
class TwoLayerNet:
def __init__(self, input_size, hidden_size, output_size, weight_init_std):
# 初始化权重
self.dict = {}
self.dict['w1'] = weight_init_std * np.random.randn(input_size, hidden_size) # 0.01*(784,50)
self.dict['b1'] = np.zeros(hidden_size) # (0......0) 1*50
self.dict['w2'] = weight_init_std * np.random.randn(hidden_size, output_size) # 0.01*(50,10)
self.dict['b2'] = np.zeros(output_size) # (0......0) 1*10
def predict(self, x):
w1, w2 = self.dict['w1'], self.dict['w2']
b1, b2 = self.dict['b1'], self.dict['b2']
a1 = np.dot(x, w1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, w2) + b2
y = softmax(a2)
return y
def loss(self, y, t):
t = t.argmax(axis=1)
num = y.shape[0]
s = y[np.arange(num), t]
return -np.sum(np.log(s)) / num
def gradient(self, x, t):
w1, w2 = self.dict['w1'], self.dict['w2']
b1, b2 = self.dict['b1'], self.dict['b2']
grads = {}
a1 = np.dot(x, w1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, w2) + b2
y = softmax(a2)
num = x.shape[0]
dy = (y - t) / num
grads['w2'] = np.dot(z1.T, dy)
grads['b2'] = np.sum(dy, axis=0)
da1 = np.dot(dy, w2.T)
dz1 = sigmoid_grad(a1) * da1
grads['w1'] = np.dot(x.T, dz1)
grads['b1'] = np.sum(dz1, axis=0)
return grads
def accuracy(self,x,t):
y = self.predict(x)
p = np.argmax(y, axis=1)
q = np.argmax(t, axis=1)
acc = np.sum(p == q) / len(y)
return acc
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。