Skip to contents

Print class citodnn

Usage

# S3 method for citodnn
print(x, ...)

# S3 method for citodnnBootstrap
print(x, ...)

Arguments

x

a model created by dnn

...

additional arguments

Value

original object x gets returned

Examples

# \donttest{
if(torch::torch_is_installed()){
library(cito)

set.seed(222)
validation_set<- sample(c(1:nrow(datasets::iris)),25)

# Build and train  Network
nn.fit<- dnn(Sepal.Length~., data = datasets::iris[-validation_set,])

# Structure of Neural Network
print(nn.fit)
}
#> Loss at epoch 1: 3.488939, lr: 0.01000

#> Loss at epoch 2: 0.245107, lr: 0.01000
#> Loss at epoch 3: 0.211060, lr: 0.01000
#> Loss at epoch 4: 0.236908, lr: 0.01000
#> Loss at epoch 5: 0.258936, lr: 0.01000
#> Loss at epoch 6: 0.290947, lr: 0.01000
#> Loss at epoch 7: 0.193172, lr: 0.01000
#> Loss at epoch 8: 0.162572, lr: 0.01000
#> Loss at epoch 9: 0.334945, lr: 0.01000
#> Loss at epoch 10: 0.163172, lr: 0.01000
#> Loss at epoch 11: 0.198721, lr: 0.01000
#> Loss at epoch 12: 0.222329, lr: 0.01000
#> Loss at epoch 13: 0.228592, lr: 0.01000
#> Loss at epoch 14: 0.193058, lr: 0.01000
#> Loss at epoch 15: 0.161906, lr: 0.01000
#> Loss at epoch 16: 0.164644, lr: 0.01000
#> Loss at epoch 17: 0.207957, lr: 0.01000
#> Loss at epoch 18: 0.217160, lr: 0.01000
#> Loss at epoch 19: 0.134338, lr: 0.01000
#> Loss at epoch 20: 0.163871, lr: 0.01000
#> Loss at epoch 21: 0.146897, lr: 0.01000
#> Loss at epoch 22: 0.139477, lr: 0.01000
#> Loss at epoch 23: 0.156195, lr: 0.01000
#> Loss at epoch 24: 0.162893, lr: 0.01000
#> Loss at epoch 25: 0.133141, lr: 0.01000
#> Loss at epoch 26: 0.124446, lr: 0.01000
#> Loss at epoch 27: 0.174543, lr: 0.01000
#> Loss at epoch 28: 0.151114, lr: 0.01000
#> Loss at epoch 29: 0.377626, lr: 0.01000
#> Loss at epoch 30: 0.118539, lr: 0.01000
#> Loss at epoch 31: 0.238695, lr: 0.01000
#> Loss at epoch 32: 0.188252, lr: 0.01000
#> Loss at epoch 33: 0.203104, lr: 0.01000
#> Loss at epoch 34: 0.178319, lr: 0.01000
#> Loss at epoch 35: 0.252107, lr: 0.01000
#> Loss at epoch 36: 0.135749, lr: 0.01000
#> Loss at epoch 37: 0.127166, lr: 0.01000
#> Loss at epoch 38: 0.152614, lr: 0.01000
#> Loss at epoch 39: 0.142652, lr: 0.01000
#> Loss at epoch 40: 0.113011, lr: 0.01000
#> Loss at epoch 41: 0.179346, lr: 0.01000
#> Loss at epoch 42: 0.218465, lr: 0.01000
#> Loss at epoch 43: 0.114035, lr: 0.01000
#> Loss at epoch 44: 0.125792, lr: 0.01000
#> Loss at epoch 45: 0.205525, lr: 0.01000
#> Loss at epoch 46: 0.200066, lr: 0.01000
#> Loss at epoch 47: 0.179345, lr: 0.01000
#> Loss at epoch 48: 0.181047, lr: 0.01000
#> Loss at epoch 49: 0.137267, lr: 0.01000
#> Loss at epoch 50: 0.199441, lr: 0.01000
#> Loss at epoch 51: 0.139161, lr: 0.01000
#> Loss at epoch 52: 0.182015, lr: 0.01000
#> Loss at epoch 53: 0.105407, lr: 0.01000
#> Loss at epoch 54: 0.105132, lr: 0.01000
#> Loss at epoch 55: 0.247931, lr: 0.01000
#> Loss at epoch 56: 0.133266, lr: 0.01000
#> Loss at epoch 57: 0.128581, lr: 0.01000
#> Loss at epoch 58: 0.131278, lr: 0.01000
#> Loss at epoch 59: 0.105430, lr: 0.01000
#> Loss at epoch 60: 0.340761, lr: 0.01000
#> Loss at epoch 61: 0.130478, lr: 0.01000
#> Loss at epoch 62: 0.216714, lr: 0.01000
#> Loss at epoch 63: 0.132688, lr: 0.01000
#> Loss at epoch 64: 0.117173, lr: 0.01000
#> Loss at epoch 65: 0.115693, lr: 0.01000
#> Loss at epoch 66: 0.160055, lr: 0.01000
#> Loss at epoch 67: 0.127803, lr: 0.01000
#> Loss at epoch 68: 0.133004, lr: 0.01000
#> Loss at epoch 69: 0.118690, lr: 0.01000
#> Loss at epoch 70: 0.170125, lr: 0.01000
#> Loss at epoch 71: 0.150666, lr: 0.01000
#> Loss at epoch 72: 0.204016, lr: 0.01000
#> Loss at epoch 73: 0.152163, lr: 0.01000
#> Loss at epoch 74: 0.131467, lr: 0.01000
#> Loss at epoch 75: 0.119410, lr: 0.01000
#> Loss at epoch 76: 0.107871, lr: 0.01000
#> Loss at epoch 77: 0.132379, lr: 0.01000
#> Loss at epoch 78: 0.142259, lr: 0.01000
#> Loss at epoch 79: 0.134533, lr: 0.01000
#> Loss at epoch 80: 0.105997, lr: 0.01000
#> Loss at epoch 81: 0.179817, lr: 0.01000
#> Loss at epoch 82: 0.100998, lr: 0.01000
#> Loss at epoch 83: 0.153306, lr: 0.01000
#> Loss at epoch 84: 0.148764, lr: 0.01000
#> Loss at epoch 85: 0.133173, lr: 0.01000
#> Loss at epoch 86: 0.148112, lr: 0.01000
#> Loss at epoch 87: 0.176372, lr: 0.01000
#> Loss at epoch 88: 0.158926, lr: 0.01000
#> Loss at epoch 89: 0.159746, lr: 0.01000
#> Loss at epoch 90: 0.112563, lr: 0.01000
#> Loss at epoch 91: 0.131066, lr: 0.01000
#> Loss at epoch 92: 0.144636, lr: 0.01000
#> Loss at epoch 93: 0.307309, lr: 0.01000
#> Loss at epoch 94: 0.195153, lr: 0.01000
#> Loss at epoch 95: 0.123371, lr: 0.01000
#> Loss at epoch 96: 0.106649, lr: 0.01000
#> Loss at epoch 97: 0.209070, lr: 0.01000
#> Loss at epoch 98: 0.125384, lr: 0.01000
#> Loss at epoch 99: 0.110050, lr: 0.01000
#> Loss at epoch 100: 0.144732, lr: 0.01000
#> dnn(formula = Sepal.Length ~ Sepal.Width + Petal.Length + Petal.Width + 
#>     Species - 1, data = datasets::iris[-validation_set, ])
#> An `nn_module` containing 2,951 parameters.
#> 
#> ── Modules ─────────────────────────────────────────────────────────────────────
#> • 0: <nn_linear> #350 parameters
#> • 1: <nn_selu> #0 parameters
#> • 2: <nn_linear> #2,550 parameters
#> • 3: <nn_selu> #0 parameters
#> • 4: <nn_linear> #51 parameters
# }