Changeset c912c67
- Timestamp:
- Feb 17, 2006, 6:17:10 PM (19 years ago)
- Branches:
- feature/autosink, feature/cnn, feature/cnn_org, feature/constantq, feature/crepe, feature/crepe_org, feature/pitchshift, feature/pydocstrings, feature/timestretch, fix/ffmpeg5, master, pitchshift, sampler, timestretch, yinfft+
- Children:
- d998190
- Parents:
- e968939
- Location:
- python
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
python/aubio/bench/node.py
re968939 rc912c67 144 144 act_on_results(mkdir,self.datadir,self.resdir,filter='d') 145 145 146 def pretty_print(self,values,sep='|'): 147 for i in range(len(values)): 148 print self.formats[i] % values[i], sep, 146 def pretty_print(self,sep='|'): 147 for i in self.printnames: 148 print self.formats[i] % self.v[i], sep, 149 print 150 151 def pretty_titles(self,sep='|'): 152 for i in self.printnames: 153 print self.formats[i] % i, sep, 149 154 print 150 155 151 156 def dir_exec(self): 152 157 """ run file_exec on every input file """ 153 pass 158 self.l , self.labs = [], [] 159 self.v = {} 160 for i in self.valuenames: 161 self.v[i] = [] 162 for i in self.valuelists: 163 self.v[i] = [] 164 act_on_files(self.file_exec,self.sndlist,self.reslist, \ 165 suffix='',filter=sndfile_filter) 154 166 155 167 def dir_eval(self): 156 168 pass 157 169 158 def file_exec(self): 159 pass 170 def file_gettruth(self,input): 171 """ get ground truth filenames """ 172 from os.path import isfile 173 ftrulist = [] 174 # search for match as filetask.input,".txt" 175 ftru = '.'.join(input.split('.')[:-1]) 176 ftru = '.'.join((ftru,'txt')) 177 if isfile(ftru): 178 ftrulist.append(ftru) 179 else: 180 # search for matches for filetask.input in the list of results 181 for i in range(len(self.reslist)): 182 check = '.'.join(self.reslist[i].split('.')[:-1]) 183 check = '_'.join(check.split('_')[:-1]) 184 if check == '.'.join(input.split('.')[:-1]): 185 ftrulist.append(self.reslist[i]) 186 return ftrulist 187 188 def file_exec(self,input,output): 189 """ create filetask, extract data, evaluate """ 190 filetask = self.task(input,params=self.params) 191 computed_data = filetask.compute_all() 192 ftrulist = self.file_gettruth(filetask.input) 193 for i in ftrulist: 194 filetask.eval(computed_data,i,mode='rocloc',vmode='') 195 """ append filetask.v to self.v """ 196 for i in self.valuenames: 197 self.v[i].append(filetask.v[i]) 198 for j in self.valuelists: 199 if filetask.v[j]: 200 for i in range(len(filetask.v[j])): 201 self.v[j].append(filetask.v[j][i]) 160 202 161 203 def file_eval(self): -
python/test/bench/onset/bench-onset
re968939 rc912c67 20 20 class benchonset(bench): 21 21 22 """ list of values to store per file """ 22 23 valuenames = ['orig','missed','Tm','expc','bad','Td'] 24 """ list of lists to store per file """ 23 25 valuelists = ['l','labs'] 24 printnames = [ 'mode', 'thres', 'dist', 'prec', 'recl', 'Ttrue', 'Tfp', 'Tfn', 'Tm', 'Td', 25 'aTtrue', 'aTfp', 'aTfn', 'aTm', 'aTd', 'mean', 'smean', 'amean', 'samean'] 26 """ list of values to print per dir """ 27 printnames = [ 'mode', 'thres', 'dist', 'prec', 'recl', 28 'Ttrue', 'Tfp', 'Tfn', 'Tm', 'Td', 29 'aTtrue', 'aTfp', 'aTfn', 'aTm', 'aTd', 30 'mean', 'smean', 'amean', 'samean'] 26 31 27 formats = {'mode': "%12s" , 28 'thres': "%5.4s", 29 'dist': "%5.4s", 30 'prec': "%5.4s", 31 'recl': "%5.4s", 32 33 'Ttrue': "%5.4s", 34 'Tfp': "%5.4s", 35 'Tfn': "%5.4s", 36 'Tm': "%5.4s", 37 'Td': "%5.4s", 38 39 'aTtrue':"%5.4s", 40 'aTfp': "%5.4s", 41 'aTfn': "%5.4s", 42 'aTm': "%5.4s", 43 'aTd': "%5.4s", 44 45 'mean': "%5.40s", 46 'smean': "%5.40s", 47 'amean': "%5.40s", 48 'samean': "%5.40s"} 49 50 def file_gettruth(self,input): 51 from os.path import isfile 52 ftrulist = [] 53 # search for match as filetask.input,".txt" 54 ftru = '.'.join(input.split('.')[:-1]) 55 ftru = '.'.join((ftru,'txt')) 56 if isfile(ftru): 57 ftrulist.append(ftru) 58 else: 59 # search for matches for filetask.input in the list of results 60 for i in range(len(self.reslist)): 61 check = '.'.join(self.reslist[i].split('.')[:-1]) 62 check = '_'.join(check.split('_')[:-1]) 63 if check == '.'.join(input.split('.')[:-1]): 64 ftrulist.append(self.reslist[i]) 65 return ftrulist 66 67 def file_exec(self,input,output): 68 filetask = self.task(input,params=self.params) 69 computed_data = filetask.compute_all() 70 ftrulist = self.file_gettruth(filetask.input) 71 for i in ftrulist: 72 #print i 73 filetask.eval(computed_data,i,mode='rocloc',vmode='') 74 for i in self.valuenames: 75 self.v[i] += filetask.v[i] 76 for i in filetask.v['l']: 77 self.v['l'].append(i) 78 for i in filetask.v['labs']: 79 self.v['labs'].append(i) 80 81 def dir_exec(self): 82 """ run file_exec on every input file """ 83 self.l , self.labs = [], [] 84 self.v = {} 85 for i in self.valuenames: 86 self.v[i] = 0. 87 for i in self.valuelists: 88 self.v[i] = [] 89 self.v['thres'] = self.params.threshold 90 act_on_files(self.file_exec,self.sndlist,self.reslist, \ 91 suffix='',filter=sndfile_filter) 32 """ per dir """ 33 formats = {'mode': "%12s" , 'thres': "%5.4s", 34 'dist': "%5.4s", 'prec': "%5.4s", 'recl': "%5.4s", 35 'Ttrue': "%5.4s", 'Tfp': "%5.4s", 'Tfn': "%5.4s", 36 'Tm': "%5.4s", 'Td': "%5.4s", 37 'aTtrue':"%5.4s", 'aTfp': "%5.4s", 'aTfn': "%5.4s", 38 'aTm': "%5.4s", 'aTd': "%5.4s", 39 'mean': "%5.40s", 'smean': "%5.40s", 40 'amean': "%5.40s", 'samean': "%5.40s"} 92 41 93 42 def dir_eval(self): 94 totaltrue = self.v['expc']-self.v['bad']-self.v['Td'] 95 totalfp = self.v['bad']+self.v['Td'] 96 totalfn = self.v['missed']+self.v['Tm'] 43 """ evaluate statistical data over the directory """ 44 totaltrue = sum(self.v['expc'])-sum(self.v['bad'])-sum(self.v['Td']) 45 totalfp = sum(self.v['bad'])+sum(self.v['Td']) 46 totalfn = sum(self.v['missed'])+sum(self.v['Tm']) 97 47 self.P = 100*float(totaltrue)/max(totaltrue + totalfp,1) 98 48 self.R = 100*float(totaltrue)/max(totaltrue + totalfn,1) 99 49 if self.R < 0: self.R = 0 100 50 self.F = 2.* self.P*self.R / max(float(self.P+self.R),1) 101 102 51 N = float(len(self.reslist)) 103 104 52 self.v['mode'] = self.params.onsetmode 53 self.v['thres'] = self.params.threshold 105 54 self.v['thres'] = "%2.3f" % self.params.threshold 106 55 self.v['dist'] = "%2.3f" % self.F … … 113 62 self.v['aTfp'] = totalfp/N 114 63 self.v['aTfn'] = totalfn/N 115 self.v['aTm'] = s elf.v['Tm']/N116 self.v['aTd'] = s elf.v['Td']/N64 self.v['aTm'] = sum(self.v['Tm'])/N 65 self.v['aTd'] = sum(self.v['Td'])/N 117 66 self.v['mean'] = mmean(self.v['l']) 118 67 self.v['smean'] = stdev(self.v['l']) … … 123 72 self.modes = modes 124 73 self.thresholds = thresholds 125 126 74 self.pretty_titles() 127 75 for mode in self.modes: … … 134 82 #print self.v 135 83 136 def pretty_print(self,sep='|'):137 for i in self.printnames:138 print self.formats[i] % self.v[i], sep,139 print140 141 def pretty_titles(self,sep='|'):142 for i in self.printnames:143 print self.formats[i] % i, sep,144 print145 146 84 def auto_learn(self,modes=['dual'],thresholds=[0.1,1.5]): 147 85 """ simple dichotomia like algorithm to optimise threshold """ … … 149 87 self.pretty_titles() 150 88 for mode in self.modes: 151 steps = 1 089 steps = 11 152 90 lesst = thresholds[0] 153 91 topt = thresholds[1] … … 231 169 else: print "ERR: a path is required"; sys.exit(1) 232 170 modes = ['complex', 'energy', 'phase', 'specdiff', 'kl', 'mkl', 'dual'] 233 #modes = [ ' phase' ]171 #modes = [ 'mkl' ] 234 172 thresholds = [ 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2] 235 173 #thresholds = [1.5] … … 243 181 benchonset.valuesdict = {} 244 182 245 246 183 try: 247 184 #benchonset.auto_learn2(modes=modes)
Note: See TracChangeset
for help on using the changeset viewer.