1: procedure Generate modelspace tensor(ℱ, 𝒢): # Evaluate the spatial integrals of Eq 2 for all feature maps (ℱ) and pooling field (𝒢) candidates and resolutions. |
2: for (Fl, gl) ∈ (ℱ, 𝒢) do
|
3: Ml ← TensorDot(Fl[n, Kl, xl, yl], gl[G, xl, yl], axis = [[2, 3], [1, 2]]) |
4: M ← Concatenate((Ml[n, Kl, G], ∀l ∈ L), axis = 1) |
5: M ← Z-Score(M, axis = 0) |
6: return M[n, K, G] |
7: |
8: procedure Generate voxel predictions(M, w): |
9: R̂ ← Batched-TensorDot(M[G, n, K], w[G, V, K], axis = [[2], [2]]) |
10: return R̂[n, V, G] |
11: |
12: procedure Loss(M, R, w): |
13: R̂ ← Generate voxel predictions(M, w) |
14: return L2-norm(R̂[n, V, G] − R[n, V, 1 → G])[V, G] |
15: |
16: procedure Optimize FWRF model parameters(M, R, winit): # M is the precalculated model-space tensor and R is the target voxel activity. |
17: mbest ← zeros[V] # best models initialization |
18: wbest ← zeros[V, K] # best weights initialization |
19: sbest ← inf[V] # best scores initialization |
20: for
Vb ∈ Batch(V) do # take a batch of voxels |
21: for
Gb ∈ Batch(G) do # take a batch of candidates f.p.f. |
22: wb ← winit[1 → Gb, Vb, K] |
23: for
e ∈ 1‥epochs do
|
24: sb ← zeros[Vb, Gb] # scores for this batch |
25: for
nb ∈ Batch(ntrain) do # take a batch from the training samples |
26:
|
27: for
nb ∈ Batch(nholdout) do # take a batch from the holdout samples |
28: sb ← sb + Loss(M[nb, K, Gb], R[nb, Vb], wb[Gb, Vb, K]) |
29: for
υ ∈ Element(Vb) do # for all voxels |
30: g ← argmin(sb[υ, Gb]) # get the best model for this batch |
31: if sb[υ, g] < sbest[υ] then
|
32: mbest[υ] ← g
|
33: wbest[υ, K] ← wb[g, υ, K] |
34: sbest[υ] ← sb[υ, g] |
35: return mbest, wbest, sbest
|