Tensorflow
Softmaxclassfication_tensorflow code example
설화님
2023. 12. 31. 14:09
Softmax Classification¶
[예제 13] Softmax Classification (TensorFlow)¶
Load modules¶
In [ ]:
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
print("NumPy Version :{}".format(np.__version__))
print("TensorFlow Version :{}".format(tf.__version__))
print("Matplotlib Version :{}".format(plt.matplotlib.__version__))
WARNING:tensorflow:From c:\python\Lib\site-packages\keras\src\losses.py:2976: The name tf.losses.sparse_softmax_cross_entropy is deprecated. Please use tf.compat.v1.losses.sparse_softmax_cross_entropy instead. NumPy Version :1.24.3 TensorFlow Version :2.15.0 Matplotlib Version :3.7.1
Input and Label¶
In [ ]:
x_input = tf.constant([[1, 1], [2, 2.5], [2.5, 1.3], [4.3, 9.5], [5.5, 7.0], [6, 8.2], [7, 5], [8, 6], [9, 4.5]],dtype=tf.float32)
labels = tf.constant([[1, 0, 0], [1, 0, 0], [1, 0, 0], [0, 1, 0], [0, 1, 0], [0, 1, 0], [0, 0, 1], [0, 0, 1], [0, 0, 1]],dtype=tf.float32)
n_var, n_class = 2, 3
W = tf.Variable(tf.random.normal((n_var, n_class),dtype=tf.float32))
B = tf.Variable(tf.random.normal((n_class,),dtype=tf.float32))
In [ ]:
def logits(x):
return tf.matmul(x, W) + B
def Hypothesis(x):
return tf.nn.softmax(logits(x))
In [ ]:
def Cost():
logit_value = logits(x_input)
return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logit_value, labels=labels))
학습 (Training)¶
In [ ]:
%%time
epochs = 50000
learning_rate = 0.01
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
training_idx = np.arange(0, epochs+1, 1)
cost_graph = np.zeros(epochs+1)
# 학습 (Training)
for cnt in range(0, epochs+1):
cost_graph[cnt] = Cost()
if cnt % (epochs//20) == 0:
print("[{:>5}] cost = {:>10.4}".format(cnt, cost_graph[cnt]))
optimizer.minimize(Cost, [W, B])
[ 0] cost = 8.451 [ 2500] cost = 0.07444 [ 5000] cost = 0.04577 [ 7500] cost = 0.03326 [10000] cost = 0.02618 [12500] cost = 0.02162 [15000] cost = 0.01842 [17500] cost = 0.01606 [20000] cost = 0.01424 [22500] cost = 0.0128 [25000] cost = 0.01162 [27500] cost = 0.01064 [30000] cost = 0.009823 [32500] cost = 0.00912 [35000] cost = 0.008512 [37500] cost = 0.007981 [40000] cost = 0.007513 [42500] cost = 0.007097 [45000] cost = 0.006726 [47500] cost = 0.006392 [50000] cost = 0.00609 CPU times: total: 5min 50s Wall time: 5min 51s
In [ ]:
print(np.argmax(Hypothesis(x_input),axis=1))
x_test = tf.constant([[3.5, 1.0], [5.0, 8.0], [9.0, 7.0], [5.0, 5.0]],dtype=tf.float32)
H_x = Hypothesis(x_test)
for i in range(x_test.shape[0]):
print("{} => Group {}: {}".format(x_test[i], np.argmax(H_x[i]), H_x[i]))
[0 0 0 1 1 1 2 2 2] [3.5 1. ] => Group 0: [8.0793631e-01 4.0492523e-05 1.9202325e-01] [5. 8.] => Group 1: [1.4236821e-04 9.9983788e-01 1.9779796e-05] [9. 7.] => Group 2: [1.4673294e-04 1.0119613e-03 9.9884135e-01] [5. 5.] => Group 1: [0.3243591 0.38448143 0.29115954]
Plotting¶
In [ ]:
# Training 상황에 대한 그래프 출력
# Training 회수 별 Cost 값
plt.title("'Cost / Epochs' Graph")
plt.xlabel("Epochs")
plt.ylabel("Cost")
plt.plot(training_idx, cost_graph)
plt.xlim(0, epochs)
plt.grid(True)
plt.semilogy()
plt.show()