diff --git a/gsrp_smart_util.py b/gsrp_smart_util.py
index 72232cde0506df4a13962b5578cbd5ee53f3c99a..eb0a8613cf4aea4fe03424f65c3e6e1a9c22e08e 100644
--- a/gsrp_smart_util.py
+++ b/gsrp_smart_util.py
@@ -101,11 +101,11 @@ def mul(mem1, mem2, cc, t_max, id1, id2, n_ind, mem_limit=np.infty):
     mask = reduce_all(np.abs(out_tij[:-1] - out_tij[-1:]) <= t_max) # Faster than numpy for large array
     out_tij = np.compress(mask, out_tij, axis=1)
     idx1, idx2 = idx1[mask], idx2[mask]
-    out_val = mem1[0][idx1] * mem2[0][idx2]
+    out_val = mem1[0][idx1] + mem2[0][idx2]
     tij_dep = out_tij[:-1] - out_tij[-1:]
     tij_dep *= np.array([1 if i > id2 else -1 for i in id1])[:, np.newaxis]
     ch_dep = np.array([num_ind(i, id2, n_ind) if i > id2 else num_ind(id2, i, n_ind) for i in id1])
-    out_val *= cc[ch_dep[:, np.newaxis], tij_dep].prod(0)
+    out_val += cc[ch_dep[:, np.newaxis], tij_dep].sum(0)
     return out_val, out_tij
 
 
@@ -132,9 +132,9 @@ def constrained_argmax(mem, cc, tij_ind, curr_tij, used_tij, t_max, n_ind):
     for u in used_tij:
         for i, c in enumerate(curr_tij):
             if u < c:
-                mem_val *= cc[num_ind(c, u, n_ind), mem_tij[i] - tij_ind[u]]
+                mem_val += cc[num_ind(c, u, n_ind), mem_tij[i] - tij_ind[u]]
             else:
-                mem_val *= cc[num_ind(u, c, n_ind), tij_ind[u] - mem_tij[i]]
+                mem_val += cc[num_ind(u, c, n_ind), tij_ind[u] - mem_tij[i]]
     return mem_tij[:, np.argmax(mem_val)]
 
 
@@ -148,7 +148,9 @@ def _get_mem_usage(memory):
 
 def smart_gsrp(cc, n_ind, n_tot, t_max, tree, program, clean_list, verbose=False, mem_limit=np.infty):
     memory = dict()
-    val = cc[:, 0].prod()
+    with np.errstate(divide='ignore'):
+        cc = np.log10(cc)
+    val = cc[:, 0].sum()
     tij = np.zeros(n_tot, int)
     for i, step in enumerate(program):
         # increase dimensions
@@ -164,7 +166,7 @@ def smart_gsrp(cc, n_ind, n_tot, t_max, tree, program, clean_list, verbose=False
                                      cc, t_max, tree[i - 1][op.left], tree[0][op.right][0], n_ind,
                                      mem_limit=mem_limit - _get_mem_usage(memory))
                 if memory[(i, j)] is None:  # means that the memory limit has been reach
-                    return np.log10(val), tij, 0
+                    return val, tij, 0
         # find potential maximum
         tij[:] = 0
         done_tij = set()
@@ -174,7 +176,7 @@ def smart_gsrp(cc, n_ind, n_tot, t_max, tree, program, clean_list, verbose=False
                 tij[curr_tij] = constrained_argmax(memory[(i, j)], cc, tij[:n_ind], curr_tij, done_tij, t_max, n_ind)
                 done_tij.update(curr_tij)
             dep_tdoa(tij, n_ind, n_tot)
-            val = max(cc[np.arange(n_tot), tij].prod(), val)
+            val = max(cc[np.arange(n_tot), tij].sum(), val)
         except ValueError:  # search of potential maxima ended outside of possible values
             tij_min, tij_max = memory[(i, j)][1].min(), memory[(i, j)][1].max()
             for k in range(j):
@@ -182,11 +184,11 @@ def smart_gsrp(cc, n_ind, n_tot, t_max, tree, program, clean_list, verbose=False
 
         if verbose:
             mem_size = _get_mem_size(memory)
-            tqdm.write(f'TDOA: {tij}, val: {val}, mem size: {mem_size} items, {_get_mem_usage(memory):3.2e} octets,'
-                       f' {100 * mem_size / (n_ind // (i + 1)) / (2 * t_max + 1) ** (i + 1)}%')
+            tqdm.write(f'TDOA: {tij}, val: {20*val:7.3f}dB, mem size: {mem_size} items, {_get_mem_usage(memory):3.2e} octets,'
+                       f' {100 * mem_size / (n_ind // (i + 1)) / (2 * t_max + 1) ** (i + 1):.4}%')
 
         # Mem clean up
         for p in clean_list[i]:
             del memory[p]
 
-    return np.log10(val), tij, 1
+    return val, tij, 1