#loading keras library
library(keras)
#loading the keras inbuilt cifar10 dataset
?dataset_cifar10 #to see the help file for details of dataset
cifar<-dataset_cifar10()
str(cifar$train$x)
##  int [1:50000, 1:32, 1:32, 1:3] 59 154 255 28 170 159 164 28 134 125 ...
cifar$train$y[1:10]
##  [1] 6 9 9 4 1 1 2 7 8 3
plot(EBImage::Image(cifar$train$x[1,1:32,1:32,]), all =TRUE)

#str(train_x)
#str(Train_x)
#str(Test_x)
#Train_x[1:3,1:4]
#str(train_y)
#str(Train_y)
#dim(Train_x) <- c( nrow(Train_x),28, 28, 1)
#dim(Test_x) <- c(nrow(Test_x), 28,28,1)
#Train_y <- keras::to_categorical(Train_y, 8)[,-1]  ## 8-1 length of classes
#Test_y <- keras::to_categorical(Test_y, 8)[,-1]
#TRAINING DATA
train_x<-cifar$train$x/255
#convert a vector class to binary class matrix
#converting the target variable to once hot encoded vectors using #keras inbuilt function 'to_categorical()
train_y<-to_categorical(cifar$train$y,num_classes = 10)
#TEST DATA
test_x<-cifar$test$x/255
test_y<-to_categorical(cifar$test$y,num_classes=10) 
#checking the dimentions
dim(train_x) 
## [1] 50000    32    32     3
cat("No of training samples\t",dim(train_x)[[1]],"\tNo of test samples\t",dim(test_x)[[1]])
## No of training samples    50000  No of test samples   10000
#a linear stack of layers
model<-keras_model_sequential()
#configuring the Model
model %>%  
  #defining a 2-D convolution layer
  layer_conv_2d(filter=28,kernel_size=c(5,5),padding="same",  input_shape=c(32,32,3) ) %>%  
  layer_activation("tanh") %>%  
  
  #another 2-D convolution layer
  layer_conv_2d(filter=28 ,kernel_size=c(5,5))  %>% 
  layer_activation("tanh") %>%
  
  #Defining a Pooling layer which reduces the dimentions of the 
  #features map and reduces the computational complexity of the model
  layer_max_pooling_2d(pool_size=c(2,2)) %>%  
  #dropout layer to avoid overfitting
  layer_dropout(0.25) %>%
  layer_conv_2d(filter=28 , kernel_size=c(5,5),padding="same") %>% 
  layer_activation("tanh") %>%  
  layer_conv_2d(filter=28,kernel_size=c(5,5) ) %>% 
  layer_activation("tanh") %>%  
  layer_max_pooling_2d(pool_size=c(2,2)) %>%  
  layer_dropout(0.25) %>%
  #flatten the input  
  layer_flatten() %>%  
  layer_dense(256) %>%  
  layer_activation("tanh") %>%  
  layer_dropout(0.5) %>%  
  #output layer-10 classes-10 units  
  layer_dense(10) %>%  ## number of classes
  #applying softmax nonlinear activation function to the output layer 
  #to calculate cross-entropy  
  layer_activation("softmax") 
#for computing Probabilities of classes-"logit(log probabilities)
#Model's Optimizer
#defining the type of optimizer-ADAM-Adaptive Momentum Estimation
opt<-optimizer_adam( lr= 0.0001 , decay = 1e-6 )
#lr-learning rate , decay - learning rate decay over each update

model %>%
 compile(loss="categorical_crossentropy",
 optimizer=opt,metrics = "accuracy")
#Summary of the Model and its Architecture
# summary(model)
#TRAINING PROCESS OF THE MODEL
data_augmentation <- TRUE
# if(!data_augmentation) {
# model %>% fit( train_x,train_y ,batch_size=200,
#                epochs=10,validation_data = list(test_x, test_y),
#                shuffle=TRUE)
# }else {
# #Generating images
# 
# gen_images <- image_data_generator(featurewise_center = TRUE,
#       featurewise_std_normalization = TRUE,
#       rotation_range = 20,
#       width_shift_range = 0.30,
#       height_shift_range = 0.30,
#       horizontal_flip = FALSE  )
# #Fit image data generator internal statistics to some sample data
# gen_images %>% fit_image_data_generator(train_x)
# #Generates batches of augmented/normalized data from image data and
# #labels to visually see the generated images by the Model
# model %>% fit_generator(
#      flow_images_from_data(train_x, train_y,gen_images,
#      batch_size=32,save_to_dir="../keras_mnist/"),
#      steps_per_epoch=as.integer(910),
#      epochs = 10,
#      validation_data = list(test_x, test_y))
# }
#use save_to_dir argument to specify the directory to save the #images generated by the Model and to visually check the Model's #output and ability to classify images.