Usage
config_optimizer(
type = c("adam", "adadelta", "adagrad", "rmsprop", "rprop", "sgd"),
verbose = FALSE,
...
)
Arguments
- type
character string defining which optimizer should be used. See Details.
- verbose
If TRUE, additional information about scheduler will be printed to console
- ...
additional arguments to be passed to optimizer. See Details.
Value
object of class cito_optim to give to dnn
Details
different optimizer need different variables, this function will tell you how the variables are set. For more information see the corresponding functions:
adam:
optim_adam
adadelta:
optim_adadelta
adagrad:
optim_adagrad
rmsprop:
optim_rmsprop
rprop:
optim_rprop
sgd:
optim_sgd
Examples
# \donttest{
if(torch::torch_is_installed()){
library(cito)
# create optimizer object
opt <- config_optimizer(type = "adagrad",
lr_decay = 1e-04,
weight_decay = 0.1,
verbose = TRUE)
# Build and train Network
nn.fit<- dnn(Sepal.Length~., data = datasets::iris, optimizer = opt)
}
#> set adagrad optimizer with following values
#> lr_decay: [1e-04]
#> weight_decay: [0.1]
#> initial_accumulator_value: [0]
#> eps: [1e-10]
#> Loss at epoch 1: 6.225907, lr: 0.01000
#> Loss at epoch 2: 0.178135, lr: 0.01000
#> Loss at epoch 3: 0.149779, lr: 0.01000
#> Loss at epoch 4: 0.148811, lr: 0.01000
#> Loss at epoch 5: 0.151712, lr: 0.01000
#> Loss at epoch 6: 0.135641, lr: 0.01000
#> Loss at epoch 7: 0.147265, lr: 0.01000
#> Loss at epoch 8: 0.136683, lr: 0.01000
#> Loss at epoch 9: 0.130334, lr: 0.01000
#> Loss at epoch 10: 0.150065, lr: 0.01000
#> Loss at epoch 11: 0.140675, lr: 0.01000
#> Loss at epoch 12: 0.131841, lr: 0.01000
#> Loss at epoch 13: 0.132131, lr: 0.01000
#> Loss at epoch 14: 0.130012, lr: 0.01000
#> Loss at epoch 15: 0.138619, lr: 0.01000
#> Loss at epoch 16: 0.133020, lr: 0.01000
#> Loss at epoch 17: 0.130165, lr: 0.01000
#> Loss at epoch 18: 0.129669, lr: 0.01000
#> Loss at epoch 19: 0.125787, lr: 0.01000
#> Loss at epoch 20: 0.133129, lr: 0.01000
#> Loss at epoch 21: 0.128147, lr: 0.01000
#> Loss at epoch 22: 0.128105, lr: 0.01000
#> Loss at epoch 23: 0.127118, lr: 0.01000
#> Loss at epoch 24: 0.124951, lr: 0.01000
#> Loss at epoch 25: 0.123972, lr: 0.01000
#> Loss at epoch 26: 0.125241, lr: 0.01000
#> Loss at epoch 27: 0.124767, lr: 0.01000
#> Loss at epoch 28: 0.123526, lr: 0.01000
#> Loss at epoch 29: 0.127805, lr: 0.01000
#> Loss at epoch 30: 0.123510, lr: 0.01000
#> Loss at epoch 31: 0.127298, lr: 0.01000
#> Loss at epoch 32: 0.124650, lr: 0.01000
#> Loss at epoch 33: 0.125763, lr: 0.01000
#> Loss at epoch 34: 0.126051, lr: 0.01000
#> Loss at epoch 35: 0.122242, lr: 0.01000
#> Loss at epoch 36: 0.129313, lr: 0.01000
#> Loss at epoch 37: 0.124598, lr: 0.01000
#> Loss at epoch 38: 0.122186, lr: 0.01000
#> Loss at epoch 39: 0.118365, lr: 0.01000
#> Loss at epoch 40: 0.121126, lr: 0.01000
#> Loss at epoch 41: 0.117125, lr: 0.01000
#> Loss at epoch 42: 0.123092, lr: 0.01000
#> Loss at epoch 43: 0.125103, lr: 0.01000
#> Loss at epoch 44: 0.120774, lr: 0.01000
#> Loss at epoch 45: 0.124623, lr: 0.01000
#> Loss at epoch 46: 0.121026, lr: 0.01000
#> Loss at epoch 47: 0.120437, lr: 0.01000
#> Loss at epoch 48: 0.117122, lr: 0.01000
#> Loss at epoch 49: 0.118680, lr: 0.01000
#> Loss at epoch 50: 0.120027, lr: 0.01000
#> Loss at epoch 51: 0.116412, lr: 0.01000
#> Loss at epoch 52: 0.115084, lr: 0.01000
#> Loss at epoch 53: 0.119846, lr: 0.01000
#> Loss at epoch 54: 0.115350, lr: 0.01000
#> Loss at epoch 55: 0.117904, lr: 0.01000
#> Loss at epoch 56: 0.115271, lr: 0.01000
#> Loss at epoch 57: 0.123510, lr: 0.01000
#> Loss at epoch 58: 0.117413, lr: 0.01000
#> Loss at epoch 59: 0.115741, lr: 0.01000
#> Loss at epoch 60: 0.118812, lr: 0.01000
#> Loss at epoch 61: 0.114136, lr: 0.01000
#> Loss at epoch 62: 0.117162, lr: 0.01000
#> Loss at epoch 63: 0.116445, lr: 0.01000
#> Loss at epoch 64: 0.115196, lr: 0.01000
#> Loss at epoch 65: 0.115683, lr: 0.01000
#> Loss at epoch 66: 0.113325, lr: 0.01000
#> Loss at epoch 67: 0.117382, lr: 0.01000
#> Loss at epoch 68: 0.113919, lr: 0.01000
#> Loss at epoch 69: 0.114175, lr: 0.01000
#> Loss at epoch 70: 0.117961, lr: 0.01000
#> Loss at epoch 71: 0.115343, lr: 0.01000
#> Loss at epoch 72: 0.114377, lr: 0.01000
#> Loss at epoch 73: 0.116054, lr: 0.01000
#> Loss at epoch 74: 0.116007, lr: 0.01000
#> Loss at epoch 75: 0.114202, lr: 0.01000
#> Loss at epoch 76: 0.112339, lr: 0.01000
#> Loss at epoch 77: 0.116300, lr: 0.01000
#> Loss at epoch 78: 0.113869, lr: 0.01000
#> Loss at epoch 79: 0.114203, lr: 0.01000
#> Loss at epoch 80: 0.116307, lr: 0.01000
#> Loss at epoch 81: 0.114783, lr: 0.01000
#> Loss at epoch 82: 0.110949, lr: 0.01000
#> Loss at epoch 83: 0.116322, lr: 0.01000
#> Loss at epoch 84: 0.113867, lr: 0.01000
#> Loss at epoch 85: 0.110947, lr: 0.01000
#> Loss at epoch 86: 0.116604, lr: 0.01000
#> Loss at epoch 87: 0.117781, lr: 0.01000
#> Loss at epoch 88: 0.114093, lr: 0.01000
#> Loss at epoch 89: 0.114175, lr: 0.01000
#> Loss at epoch 90: 0.112579, lr: 0.01000
#> Loss at epoch 91: 0.114545, lr: 0.01000
#> Loss at epoch 92: 0.122809, lr: 0.01000
#> Loss at epoch 93: 0.112349, lr: 0.01000
#> Loss at epoch 94: 0.112933, lr: 0.01000
#> Loss at epoch 95: 0.113195, lr: 0.01000
#> Loss at epoch 96: 0.111415, lr: 0.01000
#> Loss at epoch 97: 0.112785, lr: 0.01000
#> Loss at epoch 98: 0.115019, lr: 0.01000
#> Loss at epoch 99: 0.109299, lr: 0.01000
#> Loss at epoch 100: 0.113014, lr: 0.01000
# }