-
Notifications
You must be signed in to change notification settings - Fork 1
/
testing perceptrom.txt
117 lines (88 loc) · 3.18 KB
/
testing perceptrom.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
print("X1:" + str(X1) + " X2:" + str(X2))
print("W1:" + str(W1) + " W2:" + str(W2))
print("bias: " + str(b))
print("label: " + str(y[c]))
///
import numpy as np
# Setting the random seed, feel free to change it and see different solutions.
np.random.seed(42)
def stepFunction(t):
if t >= 0:
return 1
return 0
def prediction(X, W, b):
return stepFunction((np.matmul(X,W)+b)[0])
# TODO: Fill in the code below to implement the perceptron trick.
# The function should receive as inputs the data X, the labels y,
# the weights W (as an array), and the bias b,
# update the weights and bias W, b, according to the perceptron algorithm,
# and return W and b.
def perceptronStep(X, y, W, b, learn_rate = 0.01):
# Fill in code
c = 0
for x in X:
X1 = x[0]
X2 = x[1]
W1 = W[0]
W2 = W[1]
label = y[c]
#calc
score = W1*X1 + W2*X2 + b
#print("Score: " + str(score))
#check if label is correct
update_W = []
update_b = []
if score >= 0 and label == 0:
update_W.append([W1 - X1*learn_rate, W2 - X2*learn_rate])
update_b.append(b - learn_rate)
elif score < 0 and label == 1:
update_W.append([W1 + X1*learn_rate, W2 + X2*learn_rate])
update_b.append(b + learn_rate)
else:
update_W.append([W1, W2])
update_b.append(b)
c += 1
print(W)
return update_W, update_b
# This function runs the perceptron algorithm repeatedly on the dataset,
# and returns a few of the boundary lines obtained in the iterations,
# for plotting purposes.
# Feel free to play with the learning rate and the num_epochs,
# and see your results plotted below.
def trainPerceptronAlgorithm(X, y, learn_rate = 0.01, num_epochs = 1):
x_min, x_max = min(X.T[0]), max(X.T[0])
y_min, y_max = min(X.T[1]), max(X.T[1])
W = np.array(np.random.rand(2,1))
b = np.random.rand(1)[0] + x_max
# These are the solution lines that get plotted below.
boundary_lines = []
for i in range(num_epochs):
# In each epoch, we apply the perceptron step.
W, b = perceptronStep(X, y, W, b, learn_rate)
boundary_lines.append((-W[0]/W[1], -b/W[1]))
return boundary_lines
//////////////
label = y[c]
#calc
score = W1[0]*X1 + W2[0]*X2 + b
#print("Score: " + str(score))
#check if label is correct
if score >= 0 and label == 0:
#update_W[c] = [W1 - X1*learn_rate, W2 - X2*learn_rate]
#update_b.append(b - learn_rate)
print("out")
elif score < 0 and label == 1:
#update_W[c] = [W1 + X1*learn_rate, W2 + X2*learn_rate]
#update_b.append(b + learn_rate)
print("in")
else:
update_W.append([[W1[0]][W2[0]]])
update_b.append(b)
c += 1
#check if label is correct
if score >= 0 and label == 0:
print("substract")
elif score < 0 and label == 1:
print("add")
else:
print("all good")