TABLE VI.
GCNN | regularization | 1.09e–2 |
num_epochs | 350 | |
Fs | [[9]] | |
M | [137, 49] | |
Ks | [[7]] | |
batch_size | 92 | |
pool | apool1 | |
learning_rate | 1.23e–3 | |
decay_steps | 405 | |
decay_rate | 9.91e–1 | |
dropout | 6.98e–1 | |
momentum | 8.79e–1 | |
ps | [[2]] | |
FF-ANN | activation | relu |
alpha | 1.69 | |
power_t | 3.30e–1 | |
learning_rate_init | 1.09e–1 | |
hidden_layer_sizes | [955] | |
learning_rate | adaptive | |
momentum | 8.64e–1 | |
early_stopping | True | |
nesterovs_momentum | True | |
KNNs | weights | distance |
metric | canberra | |
n_neighbors | 12 | |
Linear Classifier | penalty | l1 |
l1_ratio | 4.06e–1 | |
alpha | 1.23e–3 | |
loss | log | |
n_jobs | −1 | |
tol | 1.00e–5 | |
learning_rate | invscaling | |
eta0 | 3.17e–4 | |
power_t | 1.84e–1 | |
Random Forest | max_depth | 100 |
max_leaf_nodes | None | |
criterion | gini | |
n_estimators | 211 | |
min_samples_split | 2 | |
min_weight_fraction_leaf | 1.27e–6 | |
min_impurity_decrease | 1.70e–5 | |
min_samples_leaf | 1 | |
Decision Tree | max_features | None |
criterion | entropy | |
max_depth | 10 | |
splitter | best | |
min_samples_leaf | 2 | |
min_impurity_decrease | 1.23e–3 | |
min_samples_split | 2 | |
max_leaf_nodes | None | |
min_weight_fraction_leaf | 2.08e–3 |