인공지능/CNN

이미지 사이즈 조절 및 이미지 분석 (feat. VGG)

쿠와와 2020. 12. 4. 17:49

주석을 봐도 모르겠으면 알려주세요

# Day_23_01_17flowers.py
# 17flowers_origin
import tensorflow as tf
import os
import glob
from PIL import Image
from sklearn import preprocessing, model_selection
import numpy as np


# 이미지를 224 * 224 로 바꿔야함
# 17flowers_origin -> 224로 줄여서
def resize_17flowers(src_folder, dst_folder, new_size):
    # 폴더 없으면 만들어줌
    if not os.path.exists(dst_folder):
        os.mkdir(dst_folder)

    files = glob.glob(src_folder + '/*.jpg')

    for f in files:
        img = Image.open(f)
        # 짧은 쪽이 쫙 줄어듬
        # 비율로 줄여버림
        # 가로세로 224로 줄임
        img_resize = img.resize((new_size, new_size))
        title, ext = os.path.splitext(f)
        title = title.split('\\')
        img_resize.save(dst_folder + '/' + title[1] + ext)


def get_xy(data_folder):
    x, y = [], []
    for filename in os.listdir(data_folder):
        if filename.startswith('.'):
            continue

        items = filename.split('.')
        idx = items[0][-4:]
        # print(items, idx)

        # 1~8- --> 0~79
        y.append((int(idx) - 1) // 80)

        img_1 = Image.open(os.path.join(data_folder, filename))
        img_2 = np.array(img_1)
        # print(img_2.shape)

        x.append(img_2)

    return np.float32(x), np.int32(y)


def model_17flowers_dense():
    x, y = get_xy('17flowers_224')
    # print(x.shape, y.shape)
    # print(y[:5], y[-5:])

    # print(np.min(x), np.max(x))     # 0.0 255.0
    # exit(-1)
    x = x / 255

    # 학슴은 80프로 검사는 20프로로 진행해서 만들어보자
    x_train, x_test, y_train, y_test = model_selection.train_test_split(x, y, train_size=0.8)

    model = tf.keras.Sequential()
    model.add(tf.keras.layers.Input(shape=[x.shape[1], x.shape[2], x.shape[3]]))  # input 바꾸지 않을 것임

    # 1번째 conver 완료
    model.add(tf.keras.layers.Conv2D(16, [3, 3], 1, 'same', activation='relu'))
    model.add(tf.keras.layers.MaxPool2D([2, 2], 2, 'same'))

    # 2번째 conver 완료
    model.add(tf.keras.layers.Conv2D(32, [3, 3], 1, 'same', activation='relu'))
    model.add(tf.keras.layers.MaxPool2D([2, 2], 2, 'same'))

    # 3번째 conver 완료
    model.add(tf.keras.layers.Conv2D(64, [3, 3], 1, 'same', activation='relu'))
    model.add(tf.keras.layers.MaxPool2D([2, 2], 2, 'same'))

    model.add(tf.keras.layers.Flatten())

    model.add(tf.keras.layers.Dense(1024, activation='relu'))
    model.add(tf.keras.layers.Dense(256, activation='relu'))
    model.add(tf.keras.layers.Dense(17, activation='softmax'))

    model.summary()

    model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001),
                  loss=tf.keras.losses.sparse_categorical_crossentropy,
                  metrics=['acc'])

    model.fit(x_train, y_train, epochs=10, batch_size=100, verbose=2)
    print('acc : ', model.evaluate(x_test, y_test, verbose=0))


def model_17flowers_conv2d():
    # x, y = get_xy('17flowers_224')
    x, y = get_xy('17flowers_56')
    # print(x.shape, y.shape)
    # print(y[:5], y[-5:])

    # print(np.min(x), np.max(x))     # 0.0 255.0
    # exit(-1)
    x = x / 255

    # 학슴은 80프로 검사는 20프로로 진행해서 만들어보자
    x_train, x_test, y_train, y_test = model_selection.train_test_split(x, y, train_size=0.8)

    model = tf.keras.Sequential()
    model.add(tf.keras.layers.Input(shape=[x.shape[1], x.shape[2], x.shape[3]]))  # input 바꾸지 않을 것임

    # 1번째 conver 완료
    model.add(tf.keras.layers.Conv2D(16, [3, 3], 1, 'same', activation='relu'))
    model.add(tf.keras.layers.MaxPool2D([2, 2], 2, 'same'))

    # 2번째 conver 완료
    model.add(tf.keras.layers.Conv2D(32, [3, 3], 1, 'same', activation='relu'))
    model.add(tf.keras.layers.MaxPool2D([2, 2], 2, 'same'))

    # 3번째 conver 완료
    model.add(tf.keras.layers.Conv2D(64, [3, 3], 1, 'same', activation='relu'))
    model.add(tf.keras.layers.MaxPool2D([2, 2], 2, 'same'))

    model.add(tf.keras.layers.Conv2D(1024, [7, 7], 1, 'valid', activation='relu'))
    model.add(tf.keras.layers.Conv2D(256, [1, 1], 1, activation='relu'))
    model.add(tf.keras.layers.Conv2D(17, [1, 1], 1, activation=None))

    # 3차원을 1차원으로 바꿔주는 작업이 필요할 듯
    # softmax는 1차원으로 바꿔줘야하기 때문에 == dense 넘어와야함
    model.add(tf.keras.layers.Reshape([17]))            # -1도 가능
    model.add(tf.keras.layers.Softmax())

    model.summary()

    model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001),
                  loss=tf.keras.losses.sparse_categorical_crossentropy,
                  metrics=['acc'])

    model.fit(x_train, y_train, epochs=10, batch_size=100, verbose=2)
    print('acc : ', model.evaluate(x_test, y_test, verbose=0))


# resize_17flowers('17flowers_origin', '17flowers_224', new_size=224)
# resize_17flowers('17flowers_origin', '17flowers_56', new_size=56)
# resize_17flowers('17flowers_origin', '17flowers_112', new_size=112)

# model_17flowers_dense()
model_17flowers_conv2d()

'인공지능 > CNN' 카테고리의 다른 글

이미지 증식과 분석, 사전학습된 것 적용시키기 (전이 학습)  (0) 2020.12.09
이미지 증식  (0) 2020.12.09
VGG  (0) 2020.12.04
LeNet5  (0) 2020.12.04
#2 (#1에서 했던) CNN 코드 Keras 사용해보기  (0) 2020.12.01