Skip to content
Snippets Groups Projects
Commit 564949c9 authored by ferrari's avatar ferrari
Browse files

Increase check for floating point errors

parent 47cc37bf
No related branches found
No related tags found
No related merge requests found
......@@ -208,7 +208,11 @@ def smart_gsrp(cc, n_ind, n_tot, t_max, tree, program, clean_list, verbose=False
memory[k] = mask_val(v, out_val - sum(m for tij, m in max_val.items() if curr_tij.isdisjoint(tij)))
if i and np.prod([v[0].size for v in memory.values()], dtype=np.float64) <= 1024: # float to prevent overflow
try:
return *add_all(memory, tree, cc, t_max, n_ind, n_tot), 1
val, tij = add_all(memory, tree, cc, t_max, n_ind, n_tot)
if val > out_val: # is false is val == out_val, or if floating point error mask potential good results
return cc[np.arange(n_tot), tij].sum(), tij, 1 # recomputing to reduce floating point errors
else:
return out_val, tij, 1
except ValueError as e:
if any(v[0].size == 0 for v in memory.values()): # due floating point error
return out_val, out_tij, 1 # current tij should only contain maxima for this error to occur
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment