Tensorflow 딥러닝 CNN

Fasion_mnist 데이터 딥러닝으로 분류하기

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.callbacks import ModelCheckpoint,EarlyStopping
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical

## tensorflow 딥러닝
#데이터
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()

# 차원 변환 후, 테스트셋과 학습셋으로 나누어 줍니다.

train_images = train_images.reshape(train_images.shape[0], 784).astype('float32') / 255
test_images = test_images.reshape(test_images.shape[0], 784).astype('float32') / 255

train_labels = to_categorical(train_labels, 10)
test_labels = to_categorical(test_labels, 10)

#딥러닝 모델 정의
model = Sequential()
# 인풋 784 아웃풋 1024 (히든층)
model.add(Dense(1024,input_dim = 784, activation= 'relu'))
# 인풋 1024(생략), 아웃풋 512 (히든층)
model.add(Dense(512, activation= 'relu'))
# 인풋 512(생략), 아웃풋 10 (출력층)
model.add(Dense(10, activation = 'softmax')) #input_dim생략 가능
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

#콜백옵션
modelpath="./MODEL_DIR/MNIST_MLP.hdf5"
checkpointer = ModelCheckpoint(filepath=modelpath, monitor='val_loss', verbose=1, save_best_only=True)
early_stopping_callback = EarlyStopping(monitor='val_loss', patience=10)

#학습
history = model.fit(train_images, train_labels, validation_split=0.25, epochs=30, batch_size=200, verbose=0, callbacks=[early_stopping_callback,checkpointer])
#평가
print("\n Test Accuracy: %.4f" % (model.evaluate(test_images, test_labels)[1])) #0.8889

Fasion_mnist 데이터 CNN으로 분류하기

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import ModelCheckpoint,EarlyStopping
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
##tensorflow CNN
#데이터
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()

# 차원 변환 후, 테스트셋과 학습셋으로 나누어 줍니다. + RGB차원 추가

train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32') / 255
test_images = test_images.reshape(test_images.shape[0], 28, 28, 1).astype('float32') / 255

train_labels = to_categorical(train_labels, 10)
test_labels = to_categorical(test_labels, 10)

#모델 정의
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), input_shape=(28, 28, 1), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))

#컴파일 및 콜백옵션
model.compile(loss='categorical_crossentropy',optimizer='adam', metrics=['accuracy'])
modelpath="./MODEL_DIR/MNIST_CNN.hdf5"
checkpointer = ModelCheckpoint(filepath=modelpath, monitor='val_loss', verbose=1, save_best_only=True)
early_stopping_callback = EarlyStopping(monitor='val_loss', patience=10)

#학습
history = model.fit(train_images, train_labels, validation_split=0.25, epochs=30, batch_size=200, verbose=0, callbacks=[early_stopping_callback,checkpointer])

#평가
print("\n Test Accuracy: %.4f" % (model.evaluate(test_images, test_labels)[1]))
# Test Accuracy: 0.9237
Author

InhwanCho

Posted on

2022-12-12

Updated on

2022-12-12

Licensed under

Comments