▩ [쉬움주의] 텐써 플로오 1.x 버전에서 keras 환경구성 1. 환경구성 conda create -n keras_study3 |
1. 학습시키는 코드
"""지금까지 제일 잘 분류하는 신경망 코드 """
from keras.models import *
from keras.layers import Dense, Flatten, Dropout, BatchNormalization, Conv2D, MaxPooling2D, Activation
from sklearn.model_selection import train_test_split
from keras.datasets.fashion_mnist import load_data
import matplotlib.pyplot as plt
import numpy as np
from keras.utils import to_categorical
from keras.layers.normalization import BatchNormalization
import loader3
from PIL import Image
"""이미지 경로 설정 """
train_image = 'c:\\animal2\\train_resize2\\'
test_image = 'c:\\animal2\\test_resize2\\'
train_label = 'c:\\animal2\\train_label.csv'
test_label = 'c:\\animal2\\test_label.csv'
""" loader3 함수를 가상환경 밑에 넣고 함수 구현 """
x_train = loader3.image_load(train_image)
y_train = loader3.label_load(train_label)
x_test = loader3.image_load(test_image)
y_test = loader3.label_load(test_label)
print ( loader3.image_load(train_image).shape )
print ( loader3.image_load(test_image).shape ) # (400,)
print ( loader3.label_load(train_label).shape) # (1600, 2)
print ( loader3.label_load(test_label).shape )
"""4차원 데이터이므로 reshape 필요없음"""
## x_train, x_test 수치 정규화를 위한 작업
x_mean = np.mean(x_train, axis = (0, 1, 2))
x_std = np.std(x_train, axis = (0, 1, 2))
x_train = (x_train - x_mean) / x_std
x_test = (x_test - x_mean) / x_std
# 검증 데이터셋을 만듭니다.
from sklearn.model_selection import train_test_split
np.random.seed(111)
# 훈련/테스트 데이터를 0.7/0.3의 비율로 분리합니다.
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train,
test_size = 0.2, random_state = 777)
## 제너레이터 사용
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(horizontal_flip = True,
vertical_flip=True,
shear_range=0.5,
width_shift_range = 0.1,
height_shift_range = 0.1,
rotation_range = 30,
fill_mode = 'nearest')
val_datagen = ImageDataGenerator()
batch_size = 32
train_generator = train_datagen.flow(x_train, y_train,
batch_size =32)
val_generator = val_datagen.flow(x_val, y_val,
batch_size =32)
from keras.models import *
from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, BatchNormalization, Activation
from keras.optimizers import Adam
from keras.applications import VGG16
# imagenet을 학습한 모델을 불러옵니다.
vgg16 = VGG16(weights = 'imagenet', input_shape = (32, 32, 3), include_top = False)
for layer in vgg16.layers[:-4]:
layer.trainable = False
model = Sequential()
model.add(vgg16)
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dense(2, activation = 'softmax'))
from keras.callbacks import ModelCheckpoint, EarlyStopping
filepath = 'c:\\animal2\\re_w2.h5'
callbacks = [ ModelCheckpoint( filepath = filepath , monitor = 'val_loss' , verbose=1, save_best_only=True ) ]
model.compile(optimizer = Adam(1e-4),
loss = 'binary_crossentropy',
metrics = ['acc'])
batch_size=32
def get_step(train_len, batch_size):
if(train_len % batch_size > 0):
return train_len // batch_size + 1
else:
return train_len // batch_size
history = model.fit(train_generator,
epochs = 30,
steps_per_epoch = get_step(len(x_train), batch_size),
validation_data = val_generator,
validation_steps = get_step(len(x_val), batch_size), callbacks=callbacks )
print(model.evaluate(x_test,y_test))
save_model(model, 'c:\\animal2\\re_w3.h5')
2.R 스튜디오 코드 ( 홈페이지 구축용 )
3. R 스튜디오 로컬용
setwd("D:\\yys267")
packages <- c('imager', "shiny", "jpeg", "png", "reticulate", "devtools")
if (length(setdiff(packages, rownames(installed.packages()))) > 0) {
install.packages(setdiff(packages, rownames(installed.packages())))
}
if (length(setdiff("keras", rownames(installed.packages()))) > 0) {
devtools::install_github("rstudio/keras")
}
require(imager)
require(shiny)
require(jpeg)
require(png)
library(reticulate)
library(keras)
#setwd(tempfile())
#setwd("/Users/aiden/Desktop/data/cifar10_densenet")
load("envir.RData")
model<<-load_model_hdf5("final.h5")
synsets <<- readLines("synset.txt")
server <- shinyServer(function(input, output) {
ntext <- eventReactive(input$goButton, {
print(input$url)
if (input$url == "http://") {
NULL
} else {
tmp_file <- tempfile()
download.file(input$url, destfile = tmp_file, mode = 'wb')
tmp_file
}
})
output$originImage = renderImage({
list(src = if (input$tabs == "Upload Image") {
if (is.null(input$file1)) {
if (input$goButton == 0 || is.null(ntext())) {
'jaguar.jpg'
} else {
ntext()
}
} else {
input$file1$datapath
}
} else {
if (input$goButton == 0 || is.null(ntext())) {
if (is.null(input$file1)) {
'jaguar.jpg'
} else {
input$file1$datapath
}
} else {
ntext()
}
},
title = "Original Image")
}, deleteFile = FALSE)
output$res <- renderText({
src = if (input$tabs == "Upload Image") {
if (is.null(input$file1)) {
if (input$goButton == 0 || is.null(ntext())) {
'jaguar.jpg'
} else {
ntext()
}
} else {
input$file1$datapath
}
} else {
if (input$goButton == 0 || is.null(ntext())) {
if (is.null(input$file1)) {
'jaguar.jpg'
} else {
input$file1$datapath
}
} else {
ntext()
}
}
img <- load.image(src)
plot(img)
img <- image_load(src, target_size = c(32,32))
img
x <- image_to_array(img)
# ensure we have a 4d tensor with single element in the batch dimension,
x <- array_reshape(x, c(1, dim(x)))
# normalize
x[,,,1] <- x[,,,1] /255.0
x[,,,2] <- x[,,,2] /255.0
x[,,,3] <- x[,,,3] /255.0
# predcit
preds <- model %>% predict(x)
# output result as string
order(preds[1,], decreasing = TRUE)
max.idx <- order(preds[1,], decreasing = TRUE)[1]
max.idx
result <- synsets[max.idx]
res_str <- ""
tmp <- strsplit(result[1], " ")[[1]]
res_str <- paste0(res_str, tmp[2])
res_str
})
})
require(imager)
require(shiny)
require(jpeg)
require(png)
ui <- shinyUI(
fluidPage(
includeCSS("bootstrap.css"),
pageWithSidebar(
headerPanel(title = '재규어 얼룩말 사진 판별 신경망',
windowTitle = 'Image Classification using DenseNet'),
fluidRow(
column(1),
column(9,
tabsetPanel(
id = "tabs",
tabPanel("Upload Image",
fileInput('file1', 'Upload a PNG / JPEG File:')),
tabPanel(
"Use the URL",
textInput("url", "Image URL:", "http://"),
actionButton("goButton", "Go!")
)
),
h3(titlePanel("DESCRIPTION - Image Classification")),
h3(titlePanel("Image Classification"))
),
column(2)
),
mainPanel(
h3("Image"),
tags$hr(),
imageOutput("originImage", height = "auto"),
tags$hr(),
h3("What is this?"),
tags$hr(),
verbatimTextOutput("res")
)
)))
shinyApp(ui = ui, server = server)
첫댓글 고생했다. 승희야 ~