Rylah's Study & Daily Life
[TenserFlow] 01. MNIST 본문
import os
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# 이 코드를 넣어야 작동이 됨
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM,Dropout,Dense
mnist = tf.keras.datasets.mnist
# MNIST 4분할 데이터 불러오기
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
print('학습용 입력 데이터 모양 : ', x_train.shape)
print('학습용 출력 데이터 모양 : ', y_train.shape)
print('평가용 입력 데이터 모양 : ', x_test.shape)
print('평가용 출력 데이터 모양 : ', y_test.shape)
# 이미지 데이터 원본 출력
# matplotlib 라이브러리 사용
import matplotlib.pyplot as plt
plt.imshow(x_train[58888], cmap='gray') # 학습용 입력 이미지 58888 번째 손글씨 출력 imshow = imageshow , cmap = colormap
plt.show()
print('58888째 학습용 데이터 입력값:', x_train[58888])
print('58888째 학습용 데이터 출력값:', y_train[58888])
# 이미지 데이터 [0, 1] 스케일링
x_train = x_train / 255.0
x_test = x_test / 255.0
plt.imshow(x_train[58888], cmap='gray')
plt.show()
print('58888째 학습용 데이터 입력값:', x_train[0])
# 인공신경망 구현
model = tf.keras.models.Sequential()
layers = tf.keras.layers
model.add(layers.Flatten(input_shape=(28, 28)))
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(10, activation='softmax'))
# 인공신경망 요약
model.summary()
# 인공신경망 학습 환경 설정
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# 인공신경망 학습
model.fit(x_train, y_train, epochs=5)
# 인공신경망 평가
model.evaluate(x_test, y_test)
# 인공신경망 예측
pick = x_test[9999].reshape(1, 28, 28)
pred = model.predict(pick)
answer = tf.argmax(pred, axis=1)
print('\n인공신경망 추측 결과 (원본):', pred)
print('인공신경망 추측 결과 (해석):', answer)
print('정답:', y_test[9999])
# P2
# 인공 신경망 입력 -> 출력을 순차적으로 하나하나 진행(왼쪽 -> 오른쪽)
model = tf.keras.models.Sequential()
# keras.layers 패키지 사용, 각종 층의 기능이 제공
layers = tf.keras.layers
# 입력된 손 글씨 이미지 하나에 있는 2차원 화소의 데이터를 1차원 벡터로 전환 Flatten
model.add(layers.Flatten(input_shape=(28, 28)))
# 2번째 은닉층 추가, Dense층(밀집 조밀) : 뉴런들을 다 연결해주는 역할 RELU
model.add(layers.Dense(128, activation="relu"))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(10, activation='softmax'))
# 인공신경망 요약
model.summary()
# 인공 신경망 학습 환경 설정
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# 인공신경망 학습
model.fit(x_train, y_train, epochs=5, verbose=1)
# 인공신경망 평가
model.evaluate(x_test, y_test, verbose=1)
# 인공신경망 예측
pick = x_test[7777].reshape(1, 28, 28)
pred = model.predict(pick)
answer = tf.argmax(pred, axis=1)
print('\n인공신경망 추측 결과 (원본):', pred)
print('인공신경망 추측 결과 (해석):', answer)
print('정답:', y_test[7777])
학습용 입력 데이터 모양 : (60000, 28, 28)
학습용 출력 데이터 모양 : (60000,)
평가용 입력 데이터 모양 : (10000, 28, 28)
평가용 출력 데이터 모양 : (10000,)
58888째 학습용 데이터 입력값: [[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 36 43 123 148 218 253 237 148
51 14 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 43 155 242 253 252 252 252 252 253 252
252 120 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 8 226 252 252 250 231 134 126 38 38 47
29 95 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 11 171 252 236 145 70 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 43 252 252 101 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 159 250 227 104 18 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 197 252 252 199 85 7 0 0 0 0
43 85 128 191 190 190 99 0 0 0]
[ 0 0 0 0 0 0 0 0 19 196 249 253 252 225 57 13 31 162
242 252 244 214 126 126 51 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 115 190 237 252 252 217 218 252
247 162 49 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 32 147 252 252 253 252
55 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 145 253 253 255 253
191 14 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 100 247 252 217 81 231
252 120 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 39 247 252 141 12 0 137
252 189 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 114 252 212 28 0 0 64
247 231 28 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 45 236 252 38 0 0 0 0
232 252 129 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 62 254 253 21 0 0 0 15
237 253 147 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 27 218 252 21 0 0 0 121
252 252 112 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 139 252 181 127 127 233 247
252 141 4 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 32 210 252 252 252 253 252
185 28 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 14 147 252 252 191 112
4 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0]]
58888째 학습용 데이터 출력값: 8
58888째 학습용 데이터 입력값: [[0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0.01176471 0.07058824 0.07058824 0.07058824 0.49411765 0.53333333
0.68627451 0.10196078 0.65098039 1. 0.96862745 0.49803922
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0. 0.11764706 0.14117647 0.36862745 0.60392157
0.66666667 0.99215686 0.99215686 0.99215686 0.99215686 0.99215686
0.88235294 0.6745098 0.99215686 0.94901961 0.76470588 0.25098039
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0.19215686 0.93333333 0.99215686 0.99215686 0.99215686
0.99215686 0.99215686 0.99215686 0.99215686 0.99215686 0.98431373
0.36470588 0.32156863 0.32156863 0.21960784 0.15294118 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0.07058824 0.85882353 0.99215686 0.99215686 0.99215686
0.99215686 0.99215686 0.77647059 0.71372549 0.96862745 0.94509804
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0. 0.31372549 0.61176471 0.41960784 0.99215686
0.99215686 0.80392157 0.04313725 0. 0.16862745 0.60392157
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0. 0. 0.05490196 0.00392157 0.60392157
0.99215686 0.35294118 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.54509804
0.99215686 0.74509804 0.00784314 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.04313725
0.74509804 0.99215686 0.2745098 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0.1372549 0.94509804 0.88235294 0.62745098 0.42352941 0.00392157
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0.31764706 0.94117647 0.99215686 0.99215686 0.46666667
0.09803922 0. 0. 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0.17647059 0.72941176 0.99215686 0.99215686
0.58823529 0.10588235 0. 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0.0627451 0.36470588 0.98823529
0.99215686 0.73333333 0. 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.97647059
0.99215686 0.97647059 0.25098039 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0.18039216 0.50980392 0.71764706 0.99215686
0.99215686 0.81176471 0.00784314 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0.15294118 0.58039216 0.89803922 0.99215686 0.99215686 0.99215686
0.98039216 0.71372549 0. 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0.09411765 0.44705882
0.86666667 0.99215686 0.99215686 0.99215686 0.99215686 0.78823529
0.30588235 0. 0. 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0. 0.09019608 0.25882353 0.83529412 0.99215686
0.99215686 0.99215686 0.99215686 0.77647059 0.31764706 0.00784314
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0.07058824 0.67058824 0.85882353 0.99215686 0.99215686 0.99215686
0.99215686 0.76470588 0.31372549 0.03529412 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0.21568627 0.6745098
0.88627451 0.99215686 0.99215686 0.99215686 0.99215686 0.95686275
0.52156863 0.04313725 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0.53333333 0.99215686
0.99215686 0.99215686 0.83137255 0.52941176 0.51764706 0.0627451
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. ]
[0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. 0. 0. 0. ]]
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
flatten (Flatten) (None, 784) 0
_________________________________________________________________
dense (Dense) (None, 128) 100480
_________________________________________________________________
dropout (Dropout) (None, 128) 0
_________________________________________________________________
dense_1 (Dense) (None, 10) 1290
=================================================================
Total params: 101,770
Trainable params: 101,770
Non-trainable params: 0
_________________________________________________________________
Epoch 1/5
1875/1875 [==============================] - 1s 531us/step - loss: 0.2921 - accuracy: 0.9150
Epoch 2/5
1875/1875 [==============================] - 1s 525us/step - loss: 0.1413 - accuracy: 0.9581
Epoch 3/5
1875/1875 [==============================] - 1s 533us/step - loss: 0.1088 - accuracy: 0.9671
Epoch 4/5
1875/1875 [==============================] - 1s 534us/step - loss: 0.0897 - accuracy: 0.9722
Epoch 5/5
1875/1875 [==============================] - 1s 529us/step - loss: 0.0758 - accuracy: 0.9764
313/313 [==============================] - 0s 368us/step - loss: 0.0704 - accuracy: 0.9773
인공신경망 추측 결과 (원본): [[5.9696924e-07 3.3972197e-11 9.7319365e-08 4.1820209e-07 1.3347925e-06
1.5079459e-05 9.9998248e-01 3.3892089e-10 2.5406795e-08 3.8196862e-10]]
인공신경망 추측 결과 (해석): tf.Tensor([6], shape=(1,), dtype=int64)
정답: 6
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
flatten_1 (Flatten) (None, 784) 0
_________________________________________________________________
dense_2 (Dense) (None, 128) 100480
_________________________________________________________________
dropout_1 (Dropout) (None, 128) 0
_________________________________________________________________
dense_3 (Dense) (None, 10) 1290
=================================================================
Total params: 101,770
Trainable params: 101,770
Non-trainable params: 0
_________________________________________________________________
Epoch 1/5
1875/1875 [==============================] - 1s 520us/step - loss: 0.2963 - accuracy: 0.9150
Epoch 2/5
1875/1875 [==============================] - 1s 519us/step - loss: 0.1422 - accuracy: 0.9575
Epoch 3/5
1875/1875 [==============================] - 1s 521us/step - loss: 0.1044 - accuracy: 0.9685
Epoch 4/5
1875/1875 [==============================] - 1s 514us/step - loss: 0.0848 - accuracy: 0.9734
Epoch 5/5
1875/1875 [==============================] - 1s 512us/step - loss: 0.0728 - accuracy: 0.9768
313/313 [==============================] - 0s 355us/step - loss: 0.0718 - accuracy: 0.9791
인공신경망 추측 결과 (원본): [[5.4633732e-11 2.7291271e-08 1.0134214e-07 8.1191749e-05 2.9305791e-16
9.9991751e-01 3.1186490e-10 5.4462762e-12 7.7860199e-07 3.9311931e-07]]
인공신경망 추측 결과 (해석): tf.Tensor([5], shape=(1,), dtype=int64)
정답: 5