diff --git a/new_specie/compute_embeddings.py b/new_specie/compute_embeddings.py
index 512554bd946b5aaeb791108aff27783d97a05404..696b73550e808e2baf1da6c6f7b8064e08c1abbb 100755
--- a/new_specie/compute_embeddings.py
+++ b/new_specie/compute_embeddings.py
@@ -7,7 +7,7 @@ import argparse
 torch.multiprocessing.set_sharing_strategy('file_system')
 
 parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Compute the auto-encoder embeddings of vocalizations once it was trained with train_AE.py")
-parser.add_argument('modelname', type=str, help='Filename of the AE weights (.stdc)')
+parser.add_argument('modelname', type=str, help='Filename of the AE weights (.stdc or .weights)')
 parser.add_argument("detections", type=str, help=".csv file with detections to be encoded. Columns filename (path of the soundfile) and pos (center of the detection in seconds) are needed")
 parser.add_argument("-audio_folder", type=str, default='./', help="Folder from which to load sound files")
 parser.add_argument("-NFFT", type=int, default=1024, help="FFT size for the spectrogram computation")
@@ -38,4 +38,4 @@ encodings = np.stack(encodings)
 
 print('Computing UMAP projections...')
 X = umap.UMAP(n_jobs=-1).fit_transform(encodings)
-np.save('encodings_'+args.modelname[:-4]+'npy', {'encodings':encodings, 'idx':idxs, 'umap':X})
+np.save(f'encodings_{args.detections[:-4]}_{args.modelname.split('.')[0]}.npy', {'encodings':encodings, 'idx':idxs, 'umap':X})