|
# 라이브러리
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout
# 18개 input layer
# unit 4개 hidden layer
# unit 3개 hidden layer
# 1개 output layser : 회귀
# 모델생성 방법1
model = Sequential()
model.add(Dense(4, activation='relu', input_shape=(18,)))
model.add(Dense(3, activation='relu'))
model.add(Dense(1))
model.summary()
model.compile(optimizer='adam', loss='mse')
# 모델생성 방법2 - Dropout 과적합 방지
model = Sequential()
model.add(Dense(4, activation='relu', input_shape=(18,)))
model.add(Dropout(0.3))
model.add(Dense(3, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
model.summary()
# 컴파일
model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])
# 학습
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=20, batch_size=16)
# 학습 및 검증 손실(MSE) 시각화
plt.plot(history.history['loss'], label='Train MSE')
plt.plot(history.history['val_loss'], label='Validation MSE')
plt.xlabel('Epoch')
plt.ylabel('Mean Squared Error')
plt.legend()
plt.show()
# 검증 데이터로 예측 수행
y_pred_train = model.predict(X_train)
y_pred_val = model.predict(X_val)
# 학습 MSE 계산
mean_squared_error(y_train, y_pred_train)
# 검증 MSE 계산
mean_squared_error(y_val, y_pred_val)
B. 이진 분류용 DNN layer
# 라이브러리
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout
# 18개 input layer
# unit 4개 hidden layer
# unit 3개 hidden layer
# 1개 output layser : 이진분류
# 모델생성 방법1
model = Sequential()
model.add(Dense(4, activation='relu', input_shape=(18,)))
model.add(Dense(3, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.summary()
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# 모델생성 방법2 - Dropout 과적합 방지
model = Sequential()
model.add(Dense(4, activation='relu', input_shape=(18,)))
model.add(Dropout(0.3))
model.add(Dense(3, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
model.summary()
# 컴파일
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# 학습
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=20, batch_size=16)
# 성능시각화
losses = pd.DataFrame(model.history.history)
losses.head()
losses[['loss','val_loss']].plot()
losses[['loss','val_loss', 'accuracy','val_accuracy']].plot()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Acc')
plt.legend(['acc', 'val_acc'])
plt.show()
C. 다중 분류용 DNN layer
# 모델생성
model = Sequential()
model.add(Dense(5, activation='relu', input_shape=(18,)))
model.add(Dropout(0.3))
model.add(Dense(4, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(2, activation='softmax'))
model.summary()
# 컴파일
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=20, batch_size=16)
# 성능시각화
losses = pd.DataFrame(model.history.history)
losses.head()
losses[['loss','val_loss']].plot()
losses[['loss','val_loss', 'accuracy','val_accuracy']].plot()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Acc')
plt.legend(['acc', 'val_acc'])
plt.show()