from random import randrange import math #the first input is a 1 to the bias weight inputs = [[1,0,0],[1,0,1],[1,1,0],[1,1,1]] targets = [0,1,1,0] #the first one in h is the input for the bias weight h = [1,0.1,0.1] dh = [1,0,0] eta = 0.2 def update(perceptron, target, output, x_input, h, dh, eta): #find error in output dout = (target - output) #find error in hidden neurons for i in range(len(dh)): dh[i] = perceptron[1][i]*dout*h[i]*(1-h[i]) #update output weights for i in range(len(perceptron[1])): perceptron[1][i] = perceptron[1][i] + eta*dout*h[i] #update hidden weights for i in range(len(perceptron[0])): for j in range(len(perceptron[0][i])): perceptron[0][i][j] = perceptron[0][i][j] + eta*x_input[j]*dh[i+1] def train(perceptron, inputs, targets, h, dh, eta): for i in range(5000): idx = randrange(len(inputs)) out = forward(perceptron, inputs[idx], h) update(perceptron, targets[idx], out, inputs[idx], h, dh, eta) return perceptron def weightedsum(weights, values): return sum(weights[i]*values[i] for i in range(len(weights))) def forward(perceptron, x_input, h, beta=1): for i in range(len(perceptron[0])): h[i+1] = 1.0/(1.0+math.exp(-beta*weightedsum(perceptron[0][i], x_input))) return weightedsum(perceptron[1],h) print '\nTRAIN XOR' #these are random weights. set them to arbitrary small numbers perceptron = [[[-0.1,-1.1,0.4],[1.1,0.5,-0.6]], [0.7,0.3,-0.5]] perceptron = train(perceptron, inputs, targets, h, dh, eta) print "[0,0] ", forward(perceptron, inputs[0], h) print "[0,1] ", forward(perceptron, inputs[1], h) print "[1,0] ", forward(perceptron, inputs[2], h) print "[1,1] ", forward(perceptron, inputs[3], h)