Skip to main content
. Author manuscript; available in PMC: 2024 Sep 1.
Published in final edited form as: Artif Intell Med. 2023 Jul 17;143:102624. doi: 10.1016/j.artmed.2023.102624
ML algorithms Parameters
Logistic Regression params = {
 Logistic Regression:
  norm of the penalty: [“l1”, “l2”, “elasticnet”]
}
Random Forest params = {
 number of trees in the forest: [10, 100, 500, 1000],
 maxim features at each split: [2, 4, 6, 8],
 function to measure the quality of a split: [“gini”, “entropy”]
}
Extra Trees params = {
 number of trees in the forest: [50, 500, 1000, 5000],
 number of features to consider when looking for the best split: {“sqrt”, “log2”, None},
 function to measure the quality of a split: [“gini”, “entropy”]
}
XGBoost params = {
 minimum sum of instance weight (hessian) needed in a child: [1, 5, 10],
 minimum loss reduction required to make a further partition on a leaf node of the tree(Gamma): [0.5, 1, 1.5, 2, 5],
 subsample ratio of the training instances: [1.0, 0.8, 0.6],
 subsample ratio of columns when constructing each tree: [0.6, 0.8, 1.0], maximum depth: [3, 4, 5]
  }
AdaBoost params={
 maximum number of estimators at which boosting is terminated: [500, 1000, 2000, 5000],
 Weight applied to each classifier at each boosting iteration (learning_rate): [.001,0.01,.1]
  }
Support Vector Machine params = {
 kernel: [“poly”, “linear”, “rbf”, “sigmoid”],
 Degree of the polynomial kernel function: [2, 3, 4, 5, 10],
 Kernel coefficient for ‘rbf’, ‘poly’ and ‘sigmoid’: [“scale”, “auto”]
}