栏目分类:
子分类:
返回
名师互学网用户登录
快速导航关闭
当前搜索
当前分类
子分类
实用工具
热门搜索
名师互学网 > IT > 软件开发 > 后端开发 > Python

全链接神经网络

Python 更新时间: 发布时间: IT归档 最新发布 模块sitemap 名妆网 法律咨询 聚返吧 英语巴士网 伯小乐 网商动力

全链接神经网络

import numpy as np

delta = 1.0

class Neuron:
    def __init__(self, lastLayerNodeNumber: int) -> None:
        self.n = lastLayerNodeNumber
        self.w = [np.random.rand() for _ in range(lastLayerNodeNumber)]

    @staticmethod
    def sigmod(x: float) ->float:
        return 1.0 / (1.0 + np.exp(x))

    def net(self, x: list) -> float:
        x = np.array(x)
        return np.inner(x, self.w)

    def activate(self, x: list) ->float:
        if len(x) != self.n:
            print('神经元的输入不合法!')
            exit(0)
        return self.sigmod(self.net(x))
    
    def print(self) -> None:
        print(self.w)


class Layer():
    def __init__(self, neuronNum: int, lastLayerNeuronNum: int) -> None:
        self.neuronNum = neuronNum
        self.neurons = [Neuron(lastLayerNeuronNum) for _ in range(neuronNum)]
        self.lastLayerNeuronNum = lastLayerNeuronNum

    def output(self, lastLayerOutput: list) -> list:
        if len(lastLayerOutput) != self.lastLayerNeuronNum:
            print('网络层的输入不合法!')
            exit(0)
        res = []
        for i in range(self.neuronNum):
            t = self.neurons[i].activate(lastLayerOutput)
            res.append(t)
        return res
    
    def setOmega(self, omega: list) -> None:
        for i in range(self.neuronNum):
            for j in range(self.neurons[i].n):
                self.neurons[i].w[j] = self.neurons[i].w[j] - delta * omega[i][j]
    
    def print(self) -> None:
        for i in range(self.neuronNum):
            self.neurons[i].print()


class NeuralNetwork():
    def __init__(self, inputNum: int, hiddenLayerNeuronNum: int, outputLayerNeuronNum: int) -> None:
        self.inputNum = inputNum
        self.hiddenLayerNeuronNum = hiddenLayerNeuronNum
        self.outputNum = outputLayerNeuronNum
        self.hiddenLayer = Layer(hiddenLayerNeuronNum, inputNum)
        self.outputLayer = Layer(outputLayerNeuronNum, hiddenLayerNeuronNum)

    def checkInputAndOutput(self, input: list=None, output: list=None) -> None:
        if input != None and len(input) != self.inputNum:
            print('训练输入与设置不符!')
            exit(0)
        if output != None and len(output) != self.outputNum:
            print('训练输出与设置不符!')
            exit(0)

    def getHiddenLayerOutput(self, input: list):
        return self.hiddenLayer.output(input)

    def getOutputLayerOutput(self, input: list):
        hiddenLayerOutput = self.getHiddenLayerOutput(input)
        res = self.outputLayer.output(hiddenLayerOutput)
        for i in range(len(res)):
            res[i] = res[i] * 2.0 - 1.0     #####
        return res

    def partialErrorNet3k(self, input: list, output: list, kthNeuron: int) -> float:
        outputLayerOutput = self.getOutputLayerOutput(input)
        y3k = outputLayerOutput[kthNeuron]
        tk = output[kthNeuron]
        return 2.0 * (y3k - tk) * y3k * (1.0 - y3k)

    # ithLayer  1: 隐藏层 2: 输出层
    def getOmega(self, ithLayer: int, jthNeuron: int, kthOmega: int) -> float:
        if ithLayer == 2:
            layer = self.hiddenLayer
        else:
            layer = self.outputLayer
        return layer.neurons[jthNeuron].w[kthOmega]

    def getGradient3ij(self, input: list, output: list, ithNeuron: int, jthOmega: int) -> float:
        outputLayerOutput = self.getOutputLayerOutput(input)
        hiddenLayerOutput = self.getHiddenLayerOutput(input)
        y3i = outputLayerOutput[ithNeuron]
        # y2j = hiddenLayerOutput[jthOmega]
        ti = output[ithNeuron]
        return 2.0 * (y3i - ti) * y3i * (1.0 - y3i)

    def getGradient2ij(self, input: list, output: list, ithNeuron: int, jthOmega: int) -> float:
        partialErrorNet3k = []
        for i in range(self.outputNum):
            partialErrorNet3k.append(self.partialErrorNet3k(input=input, output=output, kthNeuron=i))
        s = 0.0
        for k in range(self.outputNum):
            s = s + partialErrorNet3k[i] * self.getOmega(3, k, ithNeuron)
        hiddenLayerOutput = self.getHiddenLayerOutput(input=input)
        y2i = hiddenLayerOutput[ithNeuron]
        # w2ij = self.getOmega(2, ithNeuron, jthOmega)
        # y1j = input[jthOmega]
        return s * y2i * (1 - y2i)
        # return s * y2i * (1 - y2i) * y1j

    def error(self, input: list, output: list) -> float:
        outputLayerOutput = self.getOutputLayerOutput(input)
        err = 0.0
        for i in range(self.outputNum):
            err = err + (outputLayerOutput[i] - output[i]) ** 2
        return err

    def train(self, input: list, output: list) -> None:
        hiddenLayerGradient = []
        for i in range(self.hiddenLayerNeuronNum):
            layerGradient = []
            for j in range(self.inputNum):
                res = self.getGradient2ij(input=input, output=output, ithNeuron=i, jthOmega=j)
                layerGradient.append(res)
            hiddenLayerGradient.append(layerGradient)
        outputLayerGradient = []
        for i in range(self.outputNum):
            layerGradient = []
            for j in range(self.hiddenLayerNeuronNum):
                res = self.getGradient3ij(input=input, output=output, ithNeuron=i, jthOmega=j)
                layerGradient.append(res)
            outputLayerGradient.append(layerGradient)
        self.hiddenLayer.setOmega(hiddenLayerGradient)
        self.outputLayer.setOmega(outputLayerGradient)

    def print(self):
        print('Hidden layer:')
        self.hiddenLayer.print()
        print('Output layer:')
        self.outputLayer.print()


def main():
    nn = NeuralNetwork(inputNum=2, hiddenLayerNeuronNum=2, outputLayerNeuronNum=1)
    input = [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]]
    output = [[0.0], [0.0], [0.0], [1.0]]
    for i in range(4000):
        if i % 100 == 0:
            print(i)
        p = np.random.randint(0, len(input))
        nn.train(input=input[p], output=output[p])
    nn.print()

if __name__ == '__main__':
    main()
转载请注明:文章转载自 www.mshxw.com
本文地址:https://www.mshxw.com/it/269481.html
我们一直用心在做
关于我们 文章归档 网站地图 联系我们

版权所有 (c)2021-2022 MSHXW.COM

ICP备案号:晋ICP备2021003244-6号