AMO全媒体运营
首页 IT/编程 神经网络

神经网络

英文提示词: import numpy as np import utils # Forward Neural Network (FNN) class FNN: # layers …

英文提示词:

import numpy as np import utils # Forward Neural Network (FNN) class FNN: # layers = 3 and shape = [input dim, xx, class num] def __init__(self, shape, activation=’sigmoid’) -> None: self._l = len(shape) – 1 # layer number self._w = [] # weight self._b = [] # bias self._z = [i for i in range(self._l)] # wx+b self._a = [i for i in range(self._l)] # activation(wx+b) self._x = None # input feature self._n = None # batch size self._m = None # mean for normalization self._v = None # variance for normalization self._act = activation # activation function for output # He initialization for ly in range(self._l): self._w.append(np.random.randn(shape[ly], shape[ly + 1]) * np.sqrt(2 / shape[ly])) self._b.append(np.zeros((1, shape[ly + 1]))) assert self._l == len(self._w) == len(self._b) def set_normalize(self, m, v): self._m = m self._v = v def forward(self, _x): if self._m is None: self._x = _x else: self._x = utils.do_normalize(_x, self._m, self._v) self._n = self._x.shape[0] # linear -> relu -> linear -> sigmoid self._z[0] = np.dot(self._x, self._w[0]) + self._b[0] for ly in range(1, self._l): self._a[ly – 1] = utils.relu(self._z[ly – 1]) self._z[ly] = np.dot(self._a[ly – 1], self._w[ly]) + self._b[ly] if self._act == ‘softmax’: self._a[self._l – 1] = utils.softmax(self._z[self._l – 1]) else: self._a[self._l – 1] = utils.sigmoid(self._z[self._l – 1]) return self._a[self._l – 1] # if activation function is ‘softmax’, we use cee # else if activation func is ‘sigmoid’, we use mse def backward(self, _y, lr=0.1): # cee dz = self._a[self._l – 1] – _y # mse if self._act != ‘softmax’: dz = dz * self._a[self._l – 1] * (1 – self._a[self._l – 1]) for ly in reversed(range(1, self._l)): dw = 1. / self._n * np.dot(self._a[ly – 1].T, dz) db = 1. / self._n * np.sum(dz, axis=0) self._w[ly] -= lr * dw self._b[ly] -= lr * db da = np.dot(dz, self._w[ly].T) dz = da.copy() dz[self._a[ly – 1] <= 0] = 0 dw = 1. / self._n * np.dot(self._x.T, dz) db = 1. / self._n * np.sum(dz, axis=0) self._w[0] -= lr * dw self._b[0] -= lr * db # save model weights which in ‘datasetName.txt’ def save(self, _name): file = open(_name + ‘.txt’, ‘w’) for fp in self._w: for i in range(fp.shape[0]): for j in range(fp.shape[1]): file.write(str(fp[i][j])) file.write(‘ ‘) file.write(‘\n’) for fp in self._b: for i in range(fp.shape[0]): for j in range(fp.shape[1]): file.write(str(fp[i][j])) file.write(‘ ‘) file.write(‘\n’) file.close() # load model weights which in ‘datasetName.txt’ def load(self, _path): file = open(_path, ‘r’) s = file.readlines() assert len(s) == len(self._w) * 2 n = len(s) // 2 for i in range(n): s[i] = s[i].strip() s[i] = s[i].split(‘ ‘) s[i] = list(map(float, s[i])) arr = np.array(s[i]) arr = arr.reshape((self._w[i].shape[0], self._w[i].shape[1])) assert arr.shape == self._w[i].shape self._w[i] = arr s[i + n] = s[i + n].strip() s[i + n] = s[i + n].split(‘ ‘) s[i + n] = list(map(float, s[i + n])) arr = np.array(s[i + n]) arr = arr.reshape((self._b[i].shape[0], self._b[i].shape[1])) assert arr.shape == self._b[i].shape self._b[i] = arr

大语言模型链接:

ChatGPT / Claude-2 / 文心一言 / 讯飞星火 / 360智脑

生成海报
免责声明:文章内容不代表本站立场,本站不对其内容的真实性、完整性、准确性给予任何担保、暗示和承诺,仅供读者参考,文章版权归原作者所有。如本文内容影响到您的合法权益(内容、图片等),请及时联系本站,我们会及时删除处理。
域名
Avatar photo

作者: AdobeEdu

AdobeEDU设计网址导航,为设计师提供无限的创意参考,欢迎大家同时关注微信公众号:UID的小伙伴们

为您推荐

JSON

中文释义: 请帮我把JSON格式的文本翻译成简体中文,并且按照JSON格式原样返回,请不要将不同的键值混在一起返回.同时...

c++初学者的ai百科全书

中文释义: 我想让你作为一名c++全栈开发高级工程师,熟悉了解c++全栈过程,熟读了 《c++ primer》等经典书籍...

python中文工程师

中文释义: 假设你模仿一名精通Excel和WPS的中文工程师,擅长Python和写中文注释,接下来你将教导我解决问题,还...

python中文工程师(英文提示语)

中文释义: 假设你模仿一个精通Excel和WPS,擅长Python和写中文注释的中国工程师,接下来你要教我解决问题,我不...

将文章内容整理为mermaid代码

👉 将文章内容整理为mermaid代码,用于展示文章结构 Contributed 中文释义: 作为人鱼语法专家,你的任务...

发表回复

返回顶部