diff --git a/Raw Code/.gitignore b/Code/.gitignore similarity index 100% rename from Raw Code/.gitignore rename to Code/.gitignore diff --git a/Raw Code/FeatExtraction/ClassifMonoView.py b/Code/FeatExtraction/ClassifMonoView.py similarity index 100% rename from Raw Code/FeatExtraction/ClassifMonoView.py rename to Code/FeatExtraction/ClassifMonoView.py diff --git a/Raw Code/FeatExtraction/DBCrawl.py b/Code/FeatExtraction/DBCrawl.py similarity index 100% rename from Raw Code/FeatExtraction/DBCrawl.py rename to Code/FeatExtraction/DBCrawl.py diff --git a/Code/FeatExtraction/ExecClassifMV.py b/Code/FeatExtraction/ExecClassifMV.py new file mode 100644 index 0000000000000000000000000000000000000000..51663e130a492715e97f75c5581a849883380c4e --- /dev/null +++ b/Code/FeatExtraction/ExecClassifMV.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python + +""" Script to perform feature parameter optimisation """ + +# Import built-in modules +#import cv2 # for OpenCV +#import cv # for OpenCV +#import datetime # for TimeStamp in CSVFile +#from scipy.cluster.vq import * # for Clustering http://docs.scipy.org/doc/scipy/reference/cluster.vq.html +#import numpy as np # for arrays +#import time # for time calculations +from argparse import ArgumentParser # for acommand line arguments + +# Import 3rd party modules + +# Import own modules +#import DBCrawl # Functions to read Images from Database +#import ExportResults # Functions to render results +#import FeatExtraction # Functions to extract the features from Database + +# Author-Info +__author__ = "Nikolas Huelsmann" +__status__ = "Development" #Production, Development, Prototype +__date__ = 2016-03-10 + +### Argument Parser + +parser = ArgumentParser(description='Perform feature parameter optimisation') + +parser.add_argument('-p', '--path', action='store', help='Path to the database', default='D:\\CaltechMini') +parser.add_argument('-c', '--cores', action='store', type=int, help='Nb cores used for parallelization', default=1) + +args = parser.parse_args() \ No newline at end of file diff --git a/Code/FeatExtraction/ExecFeatExtraction.py b/Code/FeatExtraction/ExecFeatExtraction.py new file mode 100644 index 0000000000000000000000000000000000000000..5ac151b3681f29c2f5f6b8e2b5482f963fbb05df --- /dev/null +++ b/Code/FeatExtraction/ExecFeatExtraction.py @@ -0,0 +1,250 @@ +#!/usr/bin/env python + +""" Script to perform feature parameter optimisation """ + +# Import built-in modules +import cv2 # for OpenCV +import cv # for OpenCV +import datetime # for TimeStamp in CSVFile +from scipy.cluster.vq import * # for Clustering http://docs.scipy.org/doc/scipy/reference/cluster.vq.html +import numpy as np # for arrays +import time # for time calculations +from argparse import ArgumentParser # for acommand line arguments + +# Import 3rd party modules + +# Import own modules +import DBCrawl # Functions to read Images from Database +import ExportResults # Functions to render results +import FeatExtraction # Functions to extract the features from Database + +# Author-Info +__author__ = "Nikolas Huelsmann" +__status__ = "Development" #Production, Development, Prototype +__date__ = 2016-03-10 + +### Argument Parser + +parser = ArgumentParser(description='Export Features') + +parser.add_argument('--name', action='store', help='Name of DB, default DB', default='DB') +parser.add_argument('--path', action='store', help='Path to the database e.g. D:\\Caltech', default='D:\\CaltechMini') +parser.add_argument('--cores', action='store', help='Number of cores used for parallelization of HOG, default 1', type=int, default=1) + +parser.add_argument('--RGB', action='store_true', help='Use option to activate RGB') +parser.add_argument('--RGB_Hist', action='store', help='RGB: Number of bins for histogram, default 16', type=int, default=16) +parser.add_argument('--RGB_CI', action='store', help='RGB: Max Color Intensity [0 to VALUE], default 256', type=int, default=256) +parser.add_argument('--RGB_NMinMax', action='store_true', help='RGB: Use option to actvate MinMax Norm, default distribtion') + +parser.add_argument('--HSV', action='store_true', help='Use option to activate HSV') +parser.add_argument('--HSV_H', action='store', help='HSV: Number of bins for Hue, default 8', type=int, default=8) +parser.add_argument('--HSV_S', action='store', help='HSV: Number of bins for Saturation, default 3', type=int, default=3) +parser.add_argument('--HSV_V', action='store', help='HSV: Number of bins for Value, default 3', type=int, default=3) +parser.add_argument('--HSV_NMinMax', action='store_true', help='HSV: Use option to actvate MinMax Norm, default distribtion') + +parser.add_argument('--SIFT', action='store_true', help='Use option to activate SIFT') +parser.add_argument('--SIFT_Cluster', action='store', help='SIFT: Number of k-means cluster, default 50', type=int, default=50) +parser.add_argument('--SIFT_NMinMax', action='store_true', help='SIFT: Use option to actvate MinMax Norm, default distribtion') + +parser.add_argument('--SURF', action='store_true', help='Use option to activate SURF') +parser.add_argument('--SURF_Cluster', action='store', help='SURF: Number of k-means cluster, default 50', type=int, default=50) +parser.add_argument('--SURF_NMinMax', action='store_true', help='SURF: Use option to actvate MinMax Norm, default distribtion') + +parser.add_argument('--HOG', action='store_true', help='Use option to activate HOG') +parser.add_argument('--HOG_CellD', action='store', help='HOG: CellDimension for local histograms, default 5', type=int, default=5) +parser.add_argument('--HOG_Orient', action='store', help='HOG: Number of bins of local histograms , default 8', type=int, default=8) +parser.add_argument('--HOG_Cluster', action='store', help='HOG: Number of k-means cluster, default 12', type=int, default=12) +parser.add_argument('--HOG_Iter', action='store', help='HOG: Max. number of iterations for clustering, default 100', type=int, default=100) + + +# CELL_DIMENSION is the dimension of the cells on which we will compute local histograms +# NB_ORIENTATIONS is the number of bins of this local histograms +# intuitively, if CELL_DIMENSION is small it's better to have a small NB_ORIENTATIONS in order to have meaningful local histograms +# NB_CLUSTERS is the number of bins of the global histograms (the number of clusters in the KMEANS algorithm used for the bag of word) +# MAXITER is the maximum number of iteration for the clustering algorithm + +args = parser.parse_args() +path = args.path +NB_CORES = args.cores +nameDB = args.name + +### Helper + +# Function to transform the boolean deciscion of norm into a string +def boolNormToStr(norm): + if(norm): + return "MinMax" + else: + return "Distr" + +### Main Programm + +print "### Main Programm for Feature Extraction ###" +features = "" +if(args.RGB): + features = features + "RGB " +if(args.HSV): + features = features + "HSV " +if(args.SIFT): + features = features + "SIFT " +if(args.SURF): + features = features + "SURF " +if(args.HOG): + features = features + "HOG" + +print "Infos:\t NameDB=" + nameDB + ", Path=" + path + ", Cores=" + str(NB_CORES) + ", Features=" + features + +################################ Read Images from Database +# Determine the Database to extract features + +print "Start:\t Exportation of images from DB" + +t_db_start = time.time() + +# get dictionary to link classLabels Text to Integers +sClassLabels = DBCrawl.getClassLabels(path) + +# Get all path from all images inclusive classLabel as Integer +dfImages,nameDB = DBCrawl.imgCrawl(path, sClassLabels, nameDB) + +print "Done:\t Exportation of images from DB" + +t_db = t_db_start - time.time() + +################################ Feature Extraction +print "Start:\t Features Extraction" + +### Setup RGB +if(args.RGB): + + print "RGB:\t Start" + t_rgb_start = time.time() + + numberOfBins = args.RGB_Hist + maxColorIntensity = args.RGB_CI + boolNormMinMax = args.RGB_NMinMax + + # Infos + print "RGB:\t NumberOfBins=" + str(numberOfBins) + ", MaxColorIntensity=" + str(maxColorIntensity) + ", Norm=" + boolNormToStr(boolNormMinMax) + + # Extract Feature from DB + rgb_feat_desc,rgb_f_extr_res = FeatExtraction.calcRGBColorHisto(nameDB, dfImages, numberOfBins, maxColorIntensity, boolNormMinMax) + + t_rgb = time.time() - t_rgb_start + print "RGB:\t Done in: " + str(t_rgb) + "[s]" + + +### Setup HSV +if(args.HSV): + print "HSV:\t Start" + t_hsv_start = time.time() + + h_bins = args.HSV_H + s_bins = args.HSV_S + v_bins = args.HSV_V + histSize = [h_bins, s_bins, v_bins] + boolNormMinMax = args.HSV_NMinMax + + # Infos + print "HSV:\t HSVBins=[" + str(h_bins) + "," + str(s_bins) + "," + str(v_bins) + "], Norm=" + boolNormToStr(boolNormMinMax) + + # Extract Feature from DB + hsv_feat_desc,hsv_f_extr_res = FeatExtraction.calcHSVColorHisto(nameDB, dfImages, histSize, boolNormMinMax) + t_hsv = time.time() - t_hsv_start + print "HSV:\t Done in: " + str(t_hsv) + "[s]" + + + +### Setup SIFT +if(args.SIFT): + print "SIFT:\t Start" + t_sift_start = time.time() + + boolSIFT = True + cluster = args.SIFT_Cluster + boolNormMinMax = args.SIFT_NMinMax + + print "SIFT:\t Cluster=" + str(cluster) + ", Norm=" + boolNormToStr(boolNormMinMax) + + sift_descriptors,sift_des_list = FeatExtraction.calcSURFSIFTDescriptors(dfImages, boolSIFT) + sift_feat_desc,sift_f_extr_res = FeatExtraction.calcSURFSIFTHisto(nameDB, dfImages, cluster, boolNormMinMax, sift_descriptors, sift_des_list, boolSIFT) + t_sift = time.time() - t_sift_start + print "SIFT:\t Done in: " + str(t_sift) + "[s]" + + +### Setup SURF +if(args.SURF): + print "SURF:\t Start" + t_surf_start = time.time() + + boolSIFT = False + cluster = args.SURF_Cluster + boolNormMinMax = args.SURF_NMinMax + + print "SURF:\t Cluster=" + str(cluster) + ", Norm=" + boolNormToStr(boolNormMinMax) + + # Extract Feature from DB + surf_descriptors,surf_des_list = FeatExtraction.calcSURFSIFTDescriptors(dfImages, boolSIFT) + surf_feat_desc,surf_f_extr_res = FeatExtraction.calcSURFSIFTHisto(nameDB, dfImages, cluster, boolNormMinMax, surf_descriptors, surf_des_list, boolSIFT) + t_surf = time.time() - t_surf_start + print "SURF:\t Done in: " + str(t_surf) + "[s]" + +### Setup HOG +if(args.HOG): + print "HOG:\t Start" + t_hog_start = time.time() + + CELL_DIMENSION = args.HOG_CellD + NB_ORIENTATIONS = args.HOG_Orient + NB_CLUSTERS = args.HOG_Cluster + MAXITER = args.HOG_Iter + + print "HOG:\t CellDim=" + str(CELL_DIMENSION) + ", NbOrientations=" + str(NB_ORIENTATIONS) +", Cluster=" + str(NB_CLUSTERS) + ", MaxIter=" + str(MAXITER) + + # Extract Feature from DB + hog_feat_desc,hog_f_extr_res = FeatExtraction.calcHOGParallel(nameDB, dfImages.values, CELL_DIMENSION, NB_ORIENTATIONS, NB_CLUSTERS, MAXITER, NB_CORES) + #hog_feat_desc,hog_f_extr_res = FeatExtraction.calcHOG(nameDB, dfImages.values, CELL_DIMENSION, NB_ORIENTATIONS, NB_CLUSTERS, MAXITER) + t_hog = time.time() - t_hog_start + print "HOG:\t Done in: " + str(t_hog) + "[s]" + +print "Done:\t Features Extraction" + + +################################ SAVE TO FEATURES DATABASES +print "Start:\t Save Features to CSV Databases" + +### Classlabels and Description +OutputfileNameClassLabels = datetime.datetime.now().strftime("%Y_%m_%d") + "-" + nameDB + "-ClassLabels" +ExportResults.exportNumpyToCSV(dfImages.classLabel, OutputfileNameClassLabels, '%i') + +fileNameClassLabels = datetime.datetime.now().strftime("%Y_%m_%d") + "-" + nameDB + "-ClassLabels-Description" +ExportResults.exportPandasToCSV(sClassLabels, fileNameClassLabels) + +format = '%1.30f' +### RGB +if(args.RGB): + fileName = datetime.datetime.now().strftime("%Y_%m_%d") + "-" + rgb_feat_desc + ExportResults.exportNumpyToCSV(rgb_f_extr_res, fileName, format) + + +### HSV +if(args.HSV): + fileName = datetime.datetime.now().strftime("%Y_%m_%d") + "-" + hsv_feat_desc + ExportResults.exportNumpyToCSV(hsv_f_extr_res, fileName, format) + +### SIFT +if(args.SIFT): + fileName = datetime.datetime.now().strftime("%Y_%m_%d") + "-" + sift_feat_desc + ExportResults.exportNumpyToCSV(sift_f_extr_res, fileName, format) + +### SURF +if(args.SURF): + fileName = datetime.datetime.now().strftime("%Y_%m_%d") + "-" + surf_feat_desc + ExportResults.exportNumpyToCSV(surf_f_extr_res, fileName, format) + +### HOG +if(args.HOG): + fileName = datetime.datetime.now().strftime("%Y_%m_%d") + "-" + hog_feat_desc + ExportResults.exportNumpyToCSV(hog_f_extr_res, fileName, format) + +print "Done:\t Save Features to CSV Databases" \ No newline at end of file diff --git a/Raw Code/FeatExtraction/ExecFeatParaOpt.py b/Code/FeatExtraction/ExecFeatParaOpt.py similarity index 91% rename from Raw Code/FeatExtraction/ExecFeatParaOpt.py rename to Code/FeatExtraction/ExecFeatParaOpt.py index 6f094d54d042ebb8a79246fb6fba66bc46484a06..2211c07ae8b9e458c3b41fe48eaa36375ee3a0ee 100644 --- a/Raw Code/FeatExtraction/ExecFeatParaOpt.py +++ b/Code/FeatExtraction/ExecFeatParaOpt.py @@ -3,11 +3,12 @@ """ Script to perform feature parameter optimisation """ # Import built-in modules -import cv2 # for OpenCV -import cv # for OpenCV -import datetime # for TimeStamp in CSVFile -from scipy.cluster.vq import * # for Clustering http://docs.scipy.org/doc/scipy/reference/cluster.vq.html -import numpy as np # for arrays +import cv2 # for OpenCV +import cv # for OpenCV +import datetime # for TimeStamp in CSVFile +from scipy.cluster.vq import * # for Clustering http://docs.scipy.org/doc/scipy/reference/cluster.vq.html +import numpy as np # for arrays +from argparse import ArgumentParser # for acommand line arguments # Import sci-kit learn from sklearn.ensemble import RandomForestClassifier diff --git a/Raw Code/FeatExtraction/ExportResults.py b/Code/FeatExtraction/ExportResults.py similarity index 98% rename from Raw Code/FeatExtraction/ExportResults.py rename to Code/FeatExtraction/ExportResults.py index 70090f0ae03334f3b18d64944c9eb56fd8b7cd87..f1a1209680469b4af8a432099d543bf1982b1dd8 100644 --- a/Raw Code/FeatExtraction/ExportResults.py +++ b/Code/FeatExtraction/ExportResults.py @@ -35,18 +35,18 @@ def exportPandasToCSV(pandasSorDF, filename): pandasSorDF.to_csv(filename + ".csv", sep=';', decimal=',') -def exportNumpyToCSV(numpyArray, filename): +def exportNumpyToCSV(numpyArray, filename, format): path = os.getcwdu() + "\\" + filename if os.path.isfile(path + ".csv"): for i in range(1,20): testFileName = filename + "-" + str(i) + ".csv" if os.path.isfile(os.getcwdu() + "\\" + testFileName)!=True: - np.savetxt(testFileName, numpyArray, delimiter=",", fmt='%1.30f') + np.savetxt(testFileName, numpyArray, delimiter=";", fmt=format) break else: - np.savetxt(filename + ".csv", numpyArray, delimiter=";", fmt='%1.30f') + np.savetxt(filename + ".csv", numpyArray, delimiter=";", fmt=format) #### Rendering of results diff --git a/Raw Code/FeatExtraction/FeatExtraction.py b/Code/FeatExtraction/FeatExtraction.py similarity index 100% rename from Raw Code/FeatExtraction/FeatExtraction.py rename to Code/FeatExtraction/FeatExtraction.py diff --git a/Raw Code/FeatExtraction/FeatParaOpt.py b/Code/FeatExtraction/FeatParaOpt.py similarity index 100% rename from Raw Code/FeatExtraction/FeatParaOpt.py rename to Code/FeatExtraction/FeatParaOpt.py diff --git a/Raw Code/FeatExtraction/OLD/image_size.py b/Code/FeatExtraction/OLD/image_size.py similarity index 100% rename from Raw Code/FeatExtraction/OLD/image_size.py rename to Code/FeatExtraction/OLD/image_size.py diff --git a/Raw Code/FeatExtraction/OLD/testImage.jpg b/Code/FeatExtraction/OLD/testImage.jpg similarity index 100% rename from Raw Code/FeatExtraction/OLD/testImage.jpg rename to Code/FeatExtraction/OLD/testImage.jpg diff --git a/Raw Code/FeatExtraction/hog_extraction.py b/Code/FeatExtraction/hog_extraction.py similarity index 100% rename from Raw Code/FeatExtraction/hog_extraction.py rename to Code/FeatExtraction/hog_extraction.py diff --git a/Raw Code/FeatExtraction/hog_extraction_parallelized.py b/Code/FeatExtraction/hog_extraction_parallelized.py similarity index 100% rename from Raw Code/FeatExtraction/hog_extraction_parallelized.py rename to Code/FeatExtraction/hog_extraction_parallelized.py diff --git a/Raw Code/Fusion/EarlyFusion.py b/Code/Fusion/EarlyFusion.py similarity index 100% rename from Raw Code/Fusion/EarlyFusion.py rename to Code/Fusion/EarlyFusion.py diff --git a/Raw Code/Fusion/LateFusion.py b/Code/Fusion/LateFusion.py similarity index 100% rename from Raw Code/Fusion/LateFusion.py rename to Code/Fusion/LateFusion.py diff --git a/Raw Code/FeatExtraction/ExecFeatExtraction.py b/Raw Code/FeatExtraction/ExecFeatExtraction.py deleted file mode 100644 index 71a9f8c451c2d0df7503ba8eb4652bae7e0095a3..0000000000000000000000000000000000000000 --- a/Raw Code/FeatExtraction/ExecFeatExtraction.py +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/env python - -""" Script to perform feature parameter optimisation """ - -# Import built-in modules -import cv2 # for OpenCV -import cv # for OpenCV -import datetime # for TimeStamp in CSVFile -from scipy.cluster.vq import * # for Clustering http://docs.scipy.org/doc/scipy/reference/cluster.vq.html -import numpy as np # for arrays -import time # for time calculations -from argparse import ArgumentParser # for acommand line arguments - -# Import 3rd party modules - -# Import own modules -import DBCrawl # Functions to read Images from Database -import ExportResults # Functions to render results -import FeatExtraction # Functions to extract the features from Database - -# Author-Info -__author__ = "Nikolas Huelsmann" -__status__ = "Development" #Production, Development, Prototype -__date__ = 2016-02-04 - -### Argument Parser - -parser = ArgumentParser(description='Perform feature parameter optimisation') - -parser.add_argument('-p', '--path', action='store', help='Path to the database', default='D:\\CaltechMini') -parser.add_argument('-c', '--cores', action='store', type=int, help='Nb cores used for parallelization', default=1) - -args = parser.parse_args() - -path = args.path -NB_CORES = args.cores - -### Main Programm - -################################ Read Images from Database -# Determine the Database to extract features - -print "### Main Programm for Feature Extraction ###" -# path ="D:\\CaltechMini" -path = args.path -nameDB = "CT-Mini" - -print "Start:\t Exportation of images from DB" - -t_db_start = time.time() - -# get dictionary to link classLabels Text to Integers -sClassLabels = DBCrawl.getClassLabels(path) - -# Get all path from all images inclusive classLabel as Integer -dfImages,nameDB = DBCrawl.imgCrawl(path, sClassLabels, nameDB) - -print "Done:\t Exportation of images from DB" - -t_db = t_db_start - time.time() - -################################ Feature Extraction -print "Start:\t Features Extraction" - -### Setup RGB -t_rgb_start = time.time() -print "RGB:\t Start" - -numberOfBins = 16 -maxColorIntensity = 256 -boolNormMinMax = False - -# Extract Feature from DB -rgb_feat_desc,rgb_f_extr_res = FeatExtraction.calcRGBColorHisto(nameDB, dfImages, numberOfBins, maxColorIntensity, boolNormMinMax) - -t_rgb = time.time() - t_rgb_start -print "RGB:\t Done in: " + str(t_rgb) + "[s]" - - -### Setup HSV -t_hsv_start = time.time() -print "HSV:\t Start" -h_bins = 8 -s_bins = 3 -v_bins = 3 -histSize = [h_bins, s_bins, v_bins] -boolNormMinMax = False - -# Extract Feature from DB -hsv_feat_desc,hsv_f_extr_res = FeatExtraction.calcHSVColorHisto(nameDB, dfImages, histSize, boolNormMinMax) -t_hsv = time.time() - t_hsv_start -print "HSV:\t Done in: " + str(t_hsv) + "[s]" - - - -### Setup SIFT -t_sift_start = time.time() -print "SIFT:\t Start" -boolSIFT = True -cluster = 50 -boolNormMinMax = False - -sift_descriptors,sift_des_list = FeatExtraction.calcSURFSIFTDescriptors(dfImages, boolSIFT) -sift_feat_desc,sift_f_extr_res = FeatExtraction.calcSURFSIFTHisto(nameDB, dfImages, cluster, boolNormMinMax, sift_descriptors, sift_des_list, boolSIFT) -t_sift = time.time() - t_sift_start -print "SIFT:\t Done in: " + str(t_sift) + "[s]" - - -### Setup SURF -t_surf_start = time.time() -print "SURF:\t Start" -boolSIFT = False -cluster = 50 -boolNormMinMax = False - -# Extract Feature from DB -surf_descriptors,surf_des_list = FeatExtraction.calcSURFSIFTDescriptors(dfImages, boolSIFT) -surf_feat_desc,surf_f_extr_res = FeatExtraction.calcSURFSIFTHisto(nameDB, dfImages, cluster, boolNormMinMax, surf_descriptors, surf_des_list, boolSIFT) -t_surf = time.time() - t_surf_start -print "SURF:\t Done in: " + str(t_surf) + "[s]" - -### Setup HOG -t_hog_start = time.time() -print "HOG:\t Start" -CELL_DIMENSION = 5 -NB_ORIENTATIONS = 8 -NB_CLUSTERS = 12 -MAXITER = 100 - -# Extract Feature from DB -hog_feat_desc,hog_f_extr_res = FeatExtraction.calcHOGParallel(nameDB, dfImages.values, CELL_DIMENSION, NB_ORIENTATIONS, NB_CLUSTERS, MAXITER, NB_CORES) -#hog_feat_desc,hog_f_extr_res = FeatExtraction.calcHOG(nameDB, dfImages.values, CELL_DIMENSION, NB_ORIENTATIONS, NB_CLUSTERS, MAXITER) -t_hog = time.time() - t_hog_start -print "HOG:\t Done in: " + str(t_hog) + "[s]" - -print "Done:\t Features Extraction" - - -################################ SAVE TO FEATURES DATABASES -print "Start:\t Save Features to CSV Databases" - -### Classlabels and Description -OutputfileNameClassLabels = datetime.datetime.now().strftime("%Y_%m_%d") + "-" + nameDB + "-ClassLabels" -ExportResults.exportNumpyToCSV(dfImages.classLabel, OutputfileNameClassLabels) - -fileNameClassLabels = datetime.datetime.now().strftime("%Y_%m_%d") + "-" + nameDB + "-ClassLabels-Description" -ExportResults.exportPandasToCSV(sClassLabels, fileNameClassLabels) - -### RGB -fileName = datetime.datetime.now().strftime("%Y_%m_%d") + "-" + rgb_feat_desc -ExportResults.exportNumpyToCSV(rgb_f_extr_res, fileName) - -### HSV -fileName = datetime.datetime.now().strftime("%Y_%m_%d") + "-" + hsv_feat_desc -ExportResults.exportNumpyToCSV(hsv_f_extr_res, fileName) - -### SIFT -fileName = datetime.datetime.now().strftime("%Y_%m_%d") + "-" + sift_feat_desc -ExportResults.exportNumpyToCSV(sift_f_extr_res, fileName) - -### SURF -fileName = datetime.datetime.now().strftime("%Y_%m_%d") + "-" + surf_feat_desc -ExportResults.exportNumpyToCSV(surf_f_extr_res, fileName) - -### HOG -fileName = datetime.datetime.now().strftime("%Y_%m_%d") + "-" + hog_feat_desc -ExportResults.exportNumpyToCSV(hog_f_extr_res, fileName) - -print "Done:\t Save Features to CSV Databases" \ No newline at end of file