Tensorflow
Single layer perceptron code example
설화님
2023. 12. 31. 15:11
Multi Layer Perceptron (MLP)¶
[예제 1] 2 Input Logic Gate (Logistic Regression)¶
Load modules¶
In [ ]:
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
print("NumPy Version :{}".format(np.__version__))
print("TensorFlow Version :{}".format(tf.__version__))
print("Matplotlib Version :{}".format(plt.matplotlib.__version__))
WARNING:tensorflow:From c:\python\Lib\site-packages\keras\src\losses.py:2976: The name tf.losses.sparse_softmax_cross_entropy is deprecated. Please use tf.compat.v1.losses.sparse_softmax_cross_entropy instead. NumPy Version :1.24.3 TensorFlow Version :2.15.0 Matplotlib Version :3.7.1
Input and Label¶
In [ ]:
# Logistic regression : Logic Gate Truth Table
x_input = tf.constant([[0, 0], [0, 1], [1, 0], [1, 1]],dtype=tf.float32)
labels = tf.constant([[0], [0], [0], [1]],dtype=tf.float32) # Gate : AND
# labels = tf.constant([[0], [1], [1], [1]],dtype=tf.float32) # Gate : OR
# labels = tf.constant([[1], [1], [1], [0]],dtype=tf.float32) # Gate : NAND
# labels = tf.constant([[1], [0], [0], [0]],dtype=tf.float32) # Gate : NOR
# labels = tf.constant([[0], [1], [1], [0]],dtype=tf.float32) # Gate : XOR
# Weight, Bias
W = tf.Variable(tf.random.normal((2, 1),dtype=tf.float32))
B = tf.Variable(tf.random.normal((1,),dtype=tf.float32))
Hypothesis¶
In [ ]:
# Hyoithesis, Cost, Optimizer
def Hypothesis(x):
return tf.sigmoid(tf.matmul(x, W) + B)
eps = 1e-7 # prevent log(0) => infinite
def Cost():
return -tf.reduce_mean(labels * tf.math.log(Hypothesis(x_input)+eps) + (1 - labels) * tf.math.log(1 - Hypothesis(x_input) + eps))
학습 (Training)¶
In [ ]:
%%time
# 학습 (Training)
epochs = 5000
learning_rate = 0.1
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
training_idx = np.arange(0, epochs+1, 1)
cost_graph = np.zeros(epochs+1)
check = np.array([0, epochs*0.02, epochs*0.04, epochs*0.4, epochs*0.8, epochs])
w_trained = []
b_trained = []
check_idx = 0
for cnt in range(0, epochs+1):
cost_graph[cnt] = Cost()
if cnt % (epochs//20) == 0:
print("[{:>5}] cost = {:>10.4}, w = [[{:>7.4}] [{:>7.4}]], b = [{:>7.4}]".format(cnt, cost_graph[cnt], W[0,0], W[1,0], B[0]))
if check[check_idx] == cnt:
w_trained.append(W.numpy())
b_trained.append(B.numpy())
check_idx += 1
optimizer.minimize(Cost, [W, B])
[ 0] cost = 0.6963, w = [[-0.07675] [-0.6038]], b = [-0.1499] [ 250] cost = 0.3398, w = [[ 1.235] [ 1.1]], b = [ -2.115] [ 500] cost = 0.2328, w = [[ 2.008] [ 1.966]], b = [ -3.251] [ 750] cost = 0.1787, w = [[ 2.569] [ 2.554]], b = [ -4.078] [ 1000] cost = 0.1452, w = [[ 3.015] [ 3.008]], b = [ -4.735] [ 1250] cost = 0.1221, w = [[ 3.385] [ 3.382]], b = [ -5.282] [ 1500] cost = 0.1052, w = [[ 3.701] [ 3.7]], b = [ -5.751] [ 1750] cost = 0.09235, w = [[ 3.978] [ 3.977]], b = [ -6.162] [ 2000] cost = 0.08221, w = [[ 4.223] [ 4.222]], b = [ -6.526] [ 2250] cost = 0.07402, w = [[ 4.443] [ 4.443]], b = [ -6.854] [ 2500] cost = 0.06727, w = [[ 4.643] [ 4.643]], b = [ -7.152] [ 2750] cost = 0.06162, w = [[ 4.826] [ 4.826]], b = [ -7.424] [ 3000] cost = 0.05682, w = [[ 4.994] [ 4.994]], b = [ -7.675] [ 3250] cost = 0.0527, w = [[ 5.151] [ 5.151]], b = [ -7.908] [ 3500] cost = 0.04913, w = [[ 5.296] [ 5.296]], b = [ -8.125] [ 3750] cost = 0.04599, w = [[ 5.432] [ 5.432]], b = [ -8.329] [ 4000] cost = 0.04323, w = [[ 5.56] [ 5.56]], b = [ -8.52] [ 4250] cost = 0.04077, w = [[ 5.68] [ 5.68]], b = [ -8.7] [ 4500] cost = 0.03857, w = [[ 5.794] [ 5.794]], b = [ -8.87] [ 4750] cost = 0.03659, w = [[ 5.902] [ 5.902]], b = [ -9.031] [ 5000] cost = 0.0348, w = [[ 6.005] [ 6.005]], b = [ -9.185] CPU times: total: 32.4 s Wall time: 32.4 s
Training Test¶
In [ ]:
# Training 상황에 대한 그래프 출력
print("[Training Test]")
H_x= Hypothesis(x_input).numpy()
H_x = H_x.reshape((-1,))
H = [int(h>0.5) for h in H_x]
for idx in range(x_input.shape[0]):
print("Input {} , Label : {} => H :{:>2}(H_x:{:>5.2})".format(x_input[idx], labels[idx], H[idx], H_x[idx]))
[Training Test] Input [0. 0.] , Label : [0.] => H : 0(H_x:0.0001) Input [0. 1.] , Label : [0.] => H : 0(H_x: 0.04) Input [1. 0.] , Label : [0.] => H : 0(H_x: 0.04) Input [1. 1.] , Label : [1.] => H : 1(H_x: 0.94)
Ploting : Cost/Training Count¶
In [ ]:
# Training 상황에 대한 그래프 출력
# Training 회수 별 Cost 값
plt.title("'Cost / Epochs' Graph")
plt.xlabel("Epochs")
plt.ylabel("Cost")
plt.plot(training_idx, cost_graph)
plt.xlim(0, epochs)
plt.grid(True)
plt.semilogy()
plt.show()
Decesion boundary¶
In [ ]:
# 구분선 그리기
x_decision = np.linspace(-0.2, 1.2, 1000)
fig, ax = plt.subplots(2, 3, figsize=(15, 11))
fig.suptitle("'Hypothesis / Epochs' Graph")
for ax_idx in range(check.size):
W = w_trained[ax_idx]
B = b_trained[ax_idx]
y_decision = -(W[0][0] * x_decision + B[0])/W[1][0]
# label의 값에 따라서 blue 또는 red 점 찍기
for i in range(labels.shape[0]):
if(labels[i][0] == 0):
ax[ax_idx // 3][ax_idx % 3].scatter(x_input[i][0], x_input[i][1], color='blue', marker="x")
else:
ax[ax_idx // 3][ax_idx % 3].scatter(x_input[i][0], x_input[i][1], color='red', marker="o")
ax[ax_idx // 3][ax_idx % 3].plot(x_decision, y_decision, color='green')
ax[ax_idx // 3][ax_idx % 3].set_title("Epochs : {}".format(check[ax_idx]))
ax[ax_idx // 3][ax_idx % 3].set_xlim((-0.2, 1.2))
ax[ax_idx // 3][ax_idx % 3].set_ylim((-0.2, 1.2))
ax[ax_idx // 3][ax_idx % 3].set_xlabel("x0")
ax[ax_idx // 3][ax_idx % 3].set_ylabel("x1")
ax[ax_idx // 3][ax_idx % 3].grid(True)
plt.show()