Skip to content
Snippets Groups Projects
Commit 3815fceb authored by paul.best's avatar paul.best
Browse files

skip k-means

parent 3f55dc05
No related branches found
No related tags found
No related merge requests found
......@@ -4,6 +4,7 @@
*/audio
*/TextGrid
*__pycache__
*log
humpback2/annots
otter/pone.0112562.s003.xlsx
......
......@@ -62,7 +62,7 @@ meta = {
}
}
vgg16 = torchmodels.vgg16(weights=torchmodels.VGG16_Weights.DEFAULT)
vgg16 = torchmodels.vgg16(pretrained=True) # weights=torchmodels.VGG16_Weights.DEFAULT)
vgg16 = vgg16.features[:13]
for nm, mod in vgg16.named_modules():
if isinstance(mod, nn.MaxPool2d):
......@@ -91,7 +91,7 @@ frontend = {
'pcenMel': lambda sr, nfft, sampleDur, n_mel : nn.Sequential(
STFT(nfft, int((sampleDur*sr - nfft)/128)),
MelFilter(sr, nfft, n_mel, sr//nfft, sr//2),
PCENLayer(n_mel),
PCENLayer(n_mel, trainable=False),
u.Croper2D(n_mel, 128)
)
}
......
......@@ -97,7 +97,10 @@ for epoch in range(100_000//len(loader)):
mask = ~df.loc[idxs].label.isna()
clusters, labels = clusters[mask], df.loc[idxs[mask]].label
writer.add_scalar('NMI HDBSCAN', metrics.normalized_mutual_info_score(labels, clusters), step)
writer.add_scalar('ARI HDBSCAN', metrics.adjusted_rand_score(labels, clusters), step)
try:
writer.add_scalar('ARI HDBSCAN', metrics.adjusted_rand_score(labels, clusters), step)
except:
pass
writer.add_scalar('Homogeneity HDBSCAN', metrics.homogeneity_score(labels, clusters), step)
writer.add_scalar('Completeness HDBSCAN', metrics.completeness_score(labels, clusters), step)
writer.add_scalar('V-Measure HDBSCAN', metrics.v_measure_score(labels, clusters), step)
......@@ -112,7 +115,7 @@ for epoch in range(100_000//len(loader)):
writer.add_histogram('HDBSCAN Precisions ', np.array(precs), step)
writer.add_histogram('HDBSCAN Recalls ', np.array(recs), step)
df.drop('cluster', axis=1, inplace=True)
continue
print('\rRunning elbow method for K-Means...', end='')
ks = (5*1.2**np.arange(20)).astype(int)
distorsions = [cluster.KMeans(n_clusters=k).fit(encodings).inertia_ for k in ks]
......@@ -126,7 +129,10 @@ for epoch in range(100_000//len(loader)):
writer.add_scalar('Silhouette', metrics.silhouette_score(encodings, clusters), step)
clusters, labels = clusters[mask], df.loc[idxs[mask]].label
writer.add_scalar('NMI K-Means', metrics.normalized_mutual_info_score(labels, clusters), step)
writer.add_scalar('ARI K-Means', metrics.adjusted_rand_score(labels, clusters), step)
try:
writer.add_scalar('ARI K-Means', metrics.adjusted_rand_score(labels, clusters), step)
except:
pass
writer.add_scalar('Homogeneity K-Means', metrics.homogeneity_score(labels, clusters), step)
writer.add_scalar('Completeness K-Means', metrics.completeness_score(labels, clusters), step)
writer.add_scalar('V-Measure K-Means', metrics.v_measure_score(labels, clusters), step)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment