diff --git a/run_baseline.py b/run_baseline.py
old mode 100644
new mode 100755
index 7cd80cb4e51bd5495a5d044067fef80b4e0e69bf..1d55f221c7f74699894eb973c53356d81d49d196
--- a/run_baseline.py
+++ b/run_baseline.py
@@ -1,8 +1,9 @@
-from tqdm import tqdm
-from soundsig.sound import Biosound
+import p_tqdm
+from soundsig.sound import BioSound
 import soundfile as sf
+from scipy.signal import resample
 import argparse
-import pandas as pd
+import pandas as pd, numpy as np
 import models
 
 parser = argparse.ArgumentParser()
@@ -15,22 +16,37 @@ def norm(arr):
     return (arr - np.mean(arr) ) / np.std(arr)
 meta = models.meta[args.specie]
 
+feats = ['fund', 'cvfund', 'maxfund', 'minfund', 'meansal', 'meanspect', 'stdspect', 'skewspect',\
+     'kurtosisspect', 'entropyspect', 'q1', 'q2', 'q3', 'meantime', 'stdtime', 'skewtime', 'kurtosistime', 'entropytime']
 
-for idx, row in tqdm(df.iterrows(), total=len(df)):
-    info = sf.info(self.audiopath+row.fn)
+def process(idx):
+    row = df.loc[idx]
+    info = sf.info(f'{args.specie}/audio/{row.fn}')
     dur, fs = info.duration, info.samplerate
     start = int(np.clip(row.pos - meta['sampleDur']/2, 0, max(0, dur - meta['sampleDur'])) * fs)
-    sig, fs = sf.read(self.audiopath+row.fn, start=start, stop=start + int(meta['sampleDur']*fs))
+    sig, fs = sf.read(f'{args.specie}/audio/{row.fn}', start=start, stop=start + int(meta['sampleDur']*fs))
     if sig.ndim == 2:
         sig = sig[:,0]
     if len(sig) < meta['sampleDur'] * fs:
         sig = np.concatenate([sig, np.zeros(int(self.sampleDur * fs) - len(sig))])
+    if fs != meta['sr']:
+        sig = resample(sig, int(len(sig)/fs*meta['sr']))
     sound = BioSound(soundWave=norm(sig), fs=fs)
     sound.spectroCalc(max_freq=meta['sr']//2)
-    sound.rms = myBioSound.sound.std() 
+    sound.rms = sound.sound.std() 
     sound.ampenv(cutoff_freq = 20, amp_sample_rate = 1000)
-    sound.spectrum(f_high=10000)
-    sound.fundest(maxFund = 1500, minFund = 300, lowFc = 200, highFc = 6000, 
+    sound.spectrum(f_high=meta['sr']//2 - 1)
+    sound.fundest(maxFund = 6000, minFund = 200, lowFc = 200, highFc = 6000, 
                            minSaliency = 0.5, debugFig = 0, 
                            minFormantFreq = 500, maxFormantBW = 500, windowFormant = 0.1,
-                           method='Stack')
\ No newline at end of file
+                           method='Stack')
+    
+    return [sound.__dict__[f] for f in feats]
+
+res = p_tqdm.p_map(process, df.index[:10])
+
+for i, mr in zip(df.index[:10], res):
+    for f, r in zip(feats, mr):
+        df.loc[i, f] = r
+
+df.to_csv(f'{args.specie}/{args.specie}_biosound.csv', index=False)
\ No newline at end of file