-
Notifications
You must be signed in to change notification settings - Fork 0
/
3rd_AND_NOT_FUNCTION.py
172 lines (160 loc) · 6.42 KB
/
3rd_AND_NOT_FUNCTION.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
import numpy as np
import pandas as pd
import math
import matplotlib
import operator
#matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
'''
Author:Priyansh Soni
'''
##AND GATE
def Testing(mat_inputs_s,mat_weight,bias):
print("------------------------")
print("Testing for ANDNOT GATE")
print("------------------------")
for i in range(len(mat_inputs_s)):
mat_inputs_x_i=list(mat_inputs_s[i].flat)
val=mat_weight[0]*mat_inputs_x_i[0]+mat_weight[1]*mat_inputs_x_i[1]+bias
if(val<=0):
output=-1
else:
output=1
print("Input: "+ str(list(mat_inputs_s[i].flat))+ " Output: "+str(output)+" (Value="+str(val)+")" )
def HebbNet():
##Inputs for AND GATE
print("------------------------")
print("HEBBNET")
print("------------------------")
mat_inputs_s=np.matrix([[1,1],[1,-1],[-1,1],[-1,-1]])
##Target Vector corresponding to Inputs
mat_target_t=[1,-1,-1,-1]
w1=w2=b=0 ##Inititally weights and bias 0 according to algorithm
mat_weight=[w1,w2]
#print(len(mat_inputs_s))
for i in range(len(mat_inputs_s)):
mat_inputs_x_i=list(mat_inputs_s[i].flat)
#print(mat_inputs_x_i)
output_y=(mat_target_t[i])
#print(output_y)
#Weight and bias updation
for j in range(len(mat_weight)):
mat_weight[j]=mat_weight[j]+mat_inputs_x_i[j]*output_y
b=b+output_y
print("Weights after Pattern "+str(i+1)+": "+ str(mat_weight))
print("Bias:",b)
print("------------------------")
print("Final Weights:", mat_weight)
print("Final Bias:",b)
print("Equation making the problem Linearly separable:")
print(str(mat_weight[0])+"*x1 + "+str(mat_weight[0])+"*x2 "+str(b)+" = 0")
###TESTING
# for i in range(len(mat_inputs_s)):
# mat_inputs_x_i=list(mat_inputs_s[i].flat)
# val=mat_weight[0]*mat_inputs_x_i[0]+mat_weight[1]*mat_inputs_x_i[1]+b
# if(val<=0):
# output=-1
# else:
# output=1
# print("Input: "+ str(list(mat_inputs_s[i].flat))+ " Output: "+str(output)+" (Value="+str(val)+")" )
Testing(mat_inputs_s,mat_weight,b)
def PerceptronRule(alpha,theta):
print("------------------------")
print("PERCEPTRON RULE")
print("------------------------")
##Inputs for AND GATE (alpha param ranges from 0 to 1 (0 1], 0 excluded
mat_inputs_s=np.matrix([[1,1],[1,-1],[-1,1],[-1,-1]])
##Target Vector corresponding to Inputs
mat_target_t=[1,-1,-1,-1]
w1=w2=b=iterations=0 ##Inititally weights and bias 0 (can be taken random as well)
mat_weight=[w1,w2]
prev_weight=[100,100]
curr_weight=mat_weight
print("------------------------")
print("Initial Weights:", mat_weight)
print("------------------------")
while(True):
iterations+=1
prev_weight=curr_weight
for i in range(len(mat_inputs_s)):
mat_inputs_x_i=list(mat_inputs_s[i].flat)
##Computation of response unit (y_in)
y_in=b+sum([mat_inputs_x_i[j]*mat_weight[j] for j in range(len(mat_inputs_x_i))])
#print("Y_in:",y_in)
#print(sum([mat_inputs_x_i[j]*mat_weight[j] for j in range(len(mat_inputs_x_i))]))
if(y_in>theta):
y=1
elif(y_in<-1*theta):
y=-1
else:
y=0
##If error occurred means y!=target or t
if(y!=mat_target_t[i]):##w1 and w2 change both
mat_weight=[mat_weight[j]+alpha*mat_target_t[i]*mat_inputs_x_i[j] for j in range(len(mat_weight)) ]
b=b+alpha*mat_target_t[i]
print("------------------------")
print("Updated weights and bias:")
print("Iteration: "+str(iterations)+", Pattern: "+str(i+1))
print("Weights:",mat_weight)
print("Bias:",b)
print("------------------------")
curr_weight=mat_weight
##Stopping Condition
if(prev_weight==curr_weight):
print("Stopping condition occured. Number of Iterations for convergence:",iterations)
print("Final Weights:",mat_weight)
print("Bias:",b)
print("------------------------")
break
Testing(mat_inputs_s,mat_weight,b)
def DeltaRule(alpha):
print("------------------------")
print("DELTA RULE OF LEARNING")
print("------------------------")
##Inputs for AND GATE (alpha param ranges from 0 to 1 (0 1], 0 excluded
mat_inputs_s=np.matrix([[1,1],[1,-1],[-1,1],[-1,-1]])
##Target Vector corresponding to Inputs
mat_target_t=[-1,1,-1,-1]
w1=w2=b=iterations=0 ##Inititally weights and bias to be taken randomly
mat_weight=[w1,w2]
print("------------------------")
print("Initial Weights:", mat_weight)
print("------------------------")
##Error threshold set
error_threshold=0.001
while(True):
iterations+=1
largest_weight_change=-1 ##initially a low number
curr_weight=mat_weight
for i in range(len(mat_inputs_s)):
mat_inputs_x_i=list(mat_inputs_s[i].flat)
##Computation of response unit (y_in)
y_in=b+sum([mat_inputs_x_i[j]*mat_weight[j] for j in range(len(mat_inputs_x_i))])
#print("Y_in=",y_in)
##NO activation function here as activation function=identity function (y=y_in)
##Weight and bias updation
b=b+alpha*(mat_target_t[i]-y_in)
mat_weight=[mat_weight[j]+alpha*(mat_target_t[i]-y_in)*mat_inputs_x_i[j] for j in range(len(mat_weight)) ]
print("Weights:", mat_weight)
print("Bias:", b)
print("------------------------")
print("Updated weights and bias:")
print("Iteration: ",iterations)
print("Weights:",mat_weight)
print("Bias:",b)
print("------------------------")
##Updating largest weight change
largest_weight_change=max(map(abs,list(map(operator.sub, mat_weight, curr_weight))))
##Stopping condition
if(iterations==2):
break
if(largest_weight_change<error_threshold):
print("Stopping condition occured. Number of Iterations:",iterations)
print("Final Weights:",mat_weight)
print("Bias:",b)
print("------------------------")
break
Testing(mat_inputs_s,mat_weight,b)
# HebbNet()
# PerceptronRule(1,0.1) #alpha=1,theta=0.1
DeltaRule(1) # 0.1<=n*alpha <= 1.0 (0.025<=alpha<=0.25). Set alpha=0.1