개요(Abstract)
아래의 내용은 hunkim님의 모두를 위한 머신러닝/딥러닝 강의에서 tensorFlow 사용 예제를 R로 변경한 내용이다.
아래의 내용은 hunkim님의 모두를 위한 머신러닝/딥러닝 강의에서 tensorFlow 사용 예제를 R로 변경한 내용이다.
hunkim님의 강좌 주소는 http://hunkim.github.io/ml/ 이다.
아래의 내용은 훈련 중 뉴런을 비활성화하여 학습하는 예제이다.
훈련 단계에서 tensorflow에 에러가 발생할 경우는 R session을 재시작 해야 한다.
library(tensorflow) library(ggplot2) library(reshape2) library(stringr) # Check out https://www.tensorflow.org/get_started/mnist/beginners for # more information about the mnist dataset datasets <- tf$contrib$learn$datasets mnist <- datasets$mnist$read_data_sets("MNIST-data", one_hot = TRUE) # parameters learning_rate = 0.001 batch_size = 100L num_epochs = 50 num_iterations = as.integer(mnist$train$num_examples / batch_size) # input place holders X = tf$placeholder(tf$float32, shape=list(NULL,784L)) Y = tf$placeholder(tf$float32, shape=list(NULL, 10L)) # weights & bias for nn layers drop_rate = tf$placeholder(tf$float32) W1 = tf$get_variable(name="weight1", shape=list(784L, 512L), initializer = tf$contrib$layers$xavier_initializer()) b1 = tf$Variable(initial_value=tf$random_normal(shape=list(512L)), name="biase1") W2 = tf$get_variable(name="weight2", shape=list(512L, 512L), initializer = tf$contrib$layers$xavier_initializer()) b2 = tf$Variable(initial_value=tf$random_normal(shape=list(512L)), name="biase2") W3 = tf$get_variable(name="weight3", shape=list(512L, 512L), initializer = tf$contrib$layers$xavier_initializer()) b3 = tf$Variable(initial_value=tf$random_normal(shape=list(512L)), name="biase3") W4 = tf$get_variable(name="weight4", shape=list(512L, 512L), initializer = tf$contrib$layers$xavier_initializer()) b4 = tf$Variable(initial_value=tf$random_normal(shape=list(512L)), name="biase4") W5 = tf$get_variable(name="weight5", shape=list(512L, 10L), initializer = tf$contrib$layers$xavier_initializer()) b5 = tf$Variable(initial_value=tf$random_normal(shape=list(10L)), name="biase5") L1 = tf$nn$relu(tf$matmul(X, W1) + b1) L1 = tf$nn$dropout(L1, rate = drop_rate) L2 = tf$nn$relu(tf$matmul(L1, W2)) + b2 L2 = tf$nn$dropout(L2, rate = drop_rate) L3 = tf$nn$relu(tf$matmul(L2, W3)) + b3 L3 = tf$nn$dropout(L3, rate = drop_rate) L4 = tf$nn$relu(tf$matmul(L3, W4)) + b4 L4 = tf$nn$dropout(L4, rate = drop_rate) hypothesis = tf$matmul(L4, W5) + b5 # softmax_cross_entropy_with_logits_v2 cost = tf$reduce_mean( tf$nn$softmax_cross_entropy_with_logits_v2(logits=hypothesis, labels=tf$stop_gradient(Y)) ) train = tf$train$AdamOptimizer(learning_rate=learning_rate)$minimize(cost) correct_prediction = tf$equal(x=tf$argmax(input=hypothesis, axis=1L), y=tf$argmax(input=Y, axis=1L)) accuracy = tf$reduce_mean(tf$cast(x=correct_prediction,dtype=tf$float32)) # train my model sess <- tf$Session() # initialize sess$run(fetches=tf$global_variables_initializer()) for (epoch in 1:num_epochs) { avg_cost = 0 for (interation in 1:num_iterations) { batches = mnist$train$next_batch(batch_size) batch_xs <- batches[[1]] batch_ys <- batches[[2]] stage = sess$run(fetches=list(train, cost), feed_dict=dict(X = batch_xs, Y = batch_ys, drop_rate = 0.3)) avg_cost = avg_cost + stage[[2]] / num_iterations } cat("Epoch:", str_pad((epoch + 1), 4, pad = "0"), "Cost:", avg_cost, "\n") } cat("\nLearning Finished!\n") # Test model and check accuracy accur <- sess$run(fetches=list(accuracy), feed_dict=dict(X = mnist$test$images, Y = mnist$test$labels, drop_rate = 0)) cat("Accuracy:", accur[[1]], "\n") # Learning Finished! # Accuracy: 0.9204 pred <- sess$run(tf$argmax(hypothesis, axis=1L), feed_dict=dict(X = mnist$test$images, drop_rate = 0)) lbls <- apply(mnist$test$labels, 1, function(row){ (which.max(row) - 1) }) table(PRED = pred, OBSRV = lbls) sess$close() # 이미지를 그래프로 보는 방법은 # https://tensorflow.rstudio.com/tfestimators/articles/examples/mnist.html 를 참조 # 0 인 글자 중 예측 오류 글자 확인 tmp.df <- as.data.frame(cbind(pred=pred, lbls = lbls)) indices <- which(tmp.df$pred != 0 & tmp.df$lbls == 0) ifelse(length(indices) <= 36, indices <- indices, indices <- sample(indices, 36)) indices <- indices[order(indices)] n <- length(indices) data <- array(mnist$test$images[indices, ], dim = c(n, 28, 28)) melted <- melt(data, varnames = c("image", "x", "y"), value.name = "intensity") ggplot(melted, aes(x = x, y = y, fill = intensity)) + geom_tile() + scale_fill_continuous(name = "Pixel Intensity") + scale_y_reverse() + facet_wrap(~ image, nrow = sqrt(n), ncol = sqrt(n)) + theme( strip.background = element_blank(), strip.text.x = element_blank(), panel.spacing = unit(0, "lines"), axis.text = element_blank(), axis.ticks = element_blank() ) + labs( title = "MNIST Image Data", subtitle = "Visualization of a sample of images contained in MNIST data set.", x = NULL, y = NULL ) print("END OF CODE")