|
Procedure: DDCW({X, Y}, p, k, l, α, β, θ) |
|
Input: Data and labels {x, y}, chunk size
p, min_experts
k, max_experts
l, fading factor α, multiplier β, threshold θ |
|
Output: Global predictions
G
|
| 1: Experts ← create_random_experts(k); |
| 2: initialize class weights wi,j
|
| 3: for
s = 0,…,n
do
|
| 4: for
i = 1,…, num_experts(Experts) do
|
| 5: Local_predictions = classify(Expertsi, xs); |
| 6: if
Local_predictions = ys
then
|
| 7: wi,L = β * wi,L; ← Multiply weight of particular expert and target class from local prediction by β |
| 8: end if
|
| 9: end for
|
| 10: if all samples in a chunk are processed then
|
| 11: Local_predictions = classify(Experts, x_s); |
| 12: Diversity = calculate_diversity(Local_predictions, y_s); |
| 13: for
i = 1,…, num_experts(Experts) do
|
| 14: expert_lifetime ← Increase expert lifetime in each period; |
| 15: wi = wi – (exp(α * expert_lifetime) – 1)/10; |
| 16: wi = wi (1 – Diversityi); |
| 17: end for
|
| 18: end if
|
| 19: for
j = 0,…,Class_labels
do
|
| 20: Global_predictionsj ← sum(wj); |
| 21: end for
|
| 22: Global_predictions ← argmax(Global_predictionsj); |
| 23: if all samples in chunk are processed then
|
| 24: w ← normalize_weights(w); |
| 25: if
Global_predictionss! = ys
then
|
| 26: if num_experts(Experts) == l
then
|
| 27: {Experts, w, expert_lifetime} ← Remove weakest expert ei based on experts score |
| 28: end if
|
| 29: if num_experts(Experts) < l
then
|
| 30: Expertsnew ← create_random_expert(); |
| 31: wnew ← 1/num_experts(Experts); |
| 32: end if
|
| 33: end if
|
| 34: {Experts, w, expert_lifetime} ← Remove experts which score is below threshold θ |
| 35: if num_experts(Experts) < k
then
|
| 36: Expertsnew ← create_random_expert(); |
| 37: wnew ← 1/num_experts(Experts); |
| 38: end if
|
| 39: end if
|
| 40: for
i = 1,…, num_experts(Experts) do
|
| 41: Sample_weightss ← random_uniform_weight(); |
| 42: Expertsi ← learn_expert(Expertsi, xs, ys, Sample_weightss); |
| 43: end for
|
| 44: return Global_predictions; |
| 45: end for
|