Package qubx :: Module optimize
[hide private]
[frames] | no frames]

Source Code for Module qubx.optimize

  1   
  2  """Globally available pluggable optimizer framework. 
  3   
  4  Copyright 2008-2012 Research Foundation State University of New York  
  5  This file is part of QUB Express.                                           
  6   
  7  QUB Express is free software; you can redistribute it and/or modify           
  8  it under the terms of the GNU General Public License as published by  
  9  the Free Software Foundation, either version 3 of the License, or     
 10  (at your option) any later version.                                   
 11   
 12  QUB Express is distributed in the hope that it will be useful,                
 13  but WITHOUT ANY WARRANTY; without even the implied warranty of        
 14  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         
 15  GNU General Public License for more details.                          
 16   
 17  You should have received a copy of the GNU General Public License,    
 18  named LICENSE.txt, in the QUB Express program directory.  If not, see         
 19  <http://www.gnu.org/licenses/>.                                       
 20   
 21  """ 
 22   
 23  import numpy 
 24  import random 
 25  import scipy 
 26  import scipy.optimize 
 27  import traceback 
 28  import qubx.fast.fit 
 29  import qubx.settings 
 30  import qubx.tree 
 31  import qubx.util_types 
 32   
 33  # mil_optimize 
 34  #   Method = Simplex 
 35  #   Simplex 
 36  #      max iterations = 100 
 37  #   Methods 
 38  #      Simplex = 2 
 39  #      DFP = 8 
 40   
 41  # func = lambda pars: score 
 42  # on_iter = lambda pars, grads, iterations, score: pass 
 43  # grad_func = (lambda pars, grads, dx: score) or None 
 44   
 45  g_optimizers = [] # [ {'name' : "", 
 46                    #    'optimize' : lambda, 
 47                    #    'defaults' : {'opt name' : val}}, 
 48                    #    'priority' : int in range(0,11) -- 0 for disabled, 1 earlier, 10 later ] 
 49  g_opt_ix = {} 
 50  OnOptimizerAdded = qubx.util_types.WeakEvent() # (name, rec) 
 51  OnOptOptions = qubx.util_types.WeakEvent() # (name) when options changed 
 52   
53 -def add_opt(name, optimize, defaults, priority=5):
54 if name in g_opt_ix: 55 rec = g_optimizers[g_opt_ix[name]] 56 else: 57 g_opt_ix[name] = len(g_optimizers) 58 rec = {} 59 g_optimizers.append(rec) 60 rec['name'] = name 61 rec['optimize'] = optimize 62 rec['defaults'] = defaults 63 rec['priority'] = priority 64 OnOptimizerAdded(name, rec)
65
66 -def del_opt(name):
67 if name in g_opt_ix: 68 ix = g_opt_ix[name] 69 del g_optimizers[ix] 70 del g_opt_ix[name] 71 for i in xrange(ix, len(g_optimizers)): 72 g_opt_ix[g_optimizers[i]['name']] = i
73
74 -def setup_opt(profile, defaults={}, method=None):
75 qubx.settings.InitSettings() # just in case 76 pro = qubx.settings.SettingsMgr[profile].active 77 meth = str(pro.find('Method').data) if (method is None) else method 78 if not (meth in g_opt_ix): 79 if ('Method' in defaults) and (defaults['Method'] in g_opt_ix): 80 meth = defaults['Method'] 81 else: 82 meth = g_optimizers[0]['name'] 83 pro['Method'].data = meth 84 optrec = g_optimizers[g_opt_ix[meth]] 85 for opt in g_optimizers: 86 mpro = pro[opt['name']] 87 sysdef = opt['defaults'] 88 for k in sysdef: 89 if not mpro[k].data: 90 if k in defaults: 91 mpro[k].data = defaults[k] 92 else: 93 mpro[k].data = sysdef[k] 94 return optrec
95
96 -def setup_opts(profile, defaults={}):
97 qubx.settings.InitSettings() 98 pro = qubx.settings.SettingsMgr[profile].active 99 methods = pro['Methods'] 100 opts = [] 101 for opt in g_optimizers: 102 optname = opt['name'] 103 priority = qubx.tree.node_data_or_set_def(methods, optname, opt['priority']) 104 if priority: 105 mpro = pro[optname] 106 sysdef = opt['defaults'] 107 for k in sysdef: 108 if not mpro[k].data: 109 if k in defaults: 110 mpro[k].data = defaults[k] 111 else: 112 mpro[k].data = sysdef[k] 113 opts.append( (priority, optname, opt, mpro) ) 114 opts.sort() 115 return opts
116
117 -def set_opt_priority(profile, method, priority):
118 qubx.settings.InitSettings() 119 pro = qubx.settings.SettingsMgr[profile].active 120 pro['Methods'][method].data = priority 121 OnOptOptions(method)
122
123 -def node_pydata(node):
124 if node.data.type == qubx.tree.QTR_TYPE_STRING: 125 return str(node.data) 126 elif node.data.count > 1: 127 return node.data[:] 128 else: 129 return node.data[0]
130
131 -def get_options(profile="default_optimize", options={}, method=None):
132 rec = setup_opt(profile, method=method) 133 pro = qubx.settings.SettingsMgr[profile].active 134 meth = str(pro['Method'].data) if (method is None) else method 135 pro = pro[meth] 136 opts = {} 137 for k in rec['defaults']: 138 if k in options: 139 opts[k] = options[k] 140 else: 141 opts[k] = node_pydata(pro[k]) 142 return opts
143
144 -def set_options(profile="default_optimize", options={}, method=None):
145 rec = setup_opt(profile) 146 pro = qubx.settings.SettingsMgr[profile].active 147 if not (method is None): 148 meth = method 149 elif 'Method' in options: 150 meth = options['Method'] 151 else: 152 meth = str(pro['Method'].data) 153 pro = pro[meth] 154 for k in rec['defaults']: 155 if k in options: 156 pro[k].data = options[k] 157 OnOptOptions(meth)
158
159 -def get_opt(profile="default_optimize", options={}):
160 rec = setup_opt(profile) 161 optf = rec['optimize'] 162 opts = get_options(profile, options) 163 def optimize(pars, func, on_iter, grad_func=None): 164 restarts = 1 165 if ('max restarts' in opts) and (restarts < opts['max restarts']): 166 restarts += opts['max restarts'] 167 while restarts: 168 restarts -= 1 169 score, pars, iterations, grads, Hessian = optf(opts, pars, func, on_iter, grad_func) 170 if ('max iterations' in opts) and (iterations < opts['max iterations']): 171 break 172 return score, pars, iterations, grads, Hessian
173 return optimize 174
175 -def get_opts(profile="default_optimize", options={}, on_start=lambda rec, opts: None, on_end=lambda rec, opts: None):
176 def optimize(pars, func, on_iter, grad_func=None): 177 score, iterations, grads, Hessian = 0.0, 0, numpy.array([0.0]*len(pars)), None 178 for priority, optname, rec, opts in setup_opts(profile): 179 optf = rec['optimize'] 180 opts = get_options(profile, options, method=optname) 181 restarts = 1 182 if ('max restarts' in opts) and (restarts < opts['max restarts']): 183 restarts += opts['max restarts'] 184 on_start(rec, opts) 185 while restarts: 186 restarts -= 1 187 score, pars, iterations, grads, Hessian = optf(opts, pars, func, on_iter, grad_func) 188 if ('max iterations' in opts) and (iterations < opts['max iterations']): 189 break 190 on_end(rec, opts) 191 return score, pars, iterations, grads, Hessian
192 return optimize 193
194 -def optimize(pars, func, on_iter, grad_func=None, profile="default_optimize", options={}, multi_method=False):
195 return (multi_method and get_opts or get_opt)(profile, options)(numpy.array(pars), func, on_iter, grad_func)
196 197
198 -def null_optimize(options, pars, func, on_iter, grad_func=None):
199 # if options["whatever"]: 200 grads = numpy.zeros(shape=pars.shape, dtype=pars.dtype) 201 Hessian = numpy.zeros(shape=(len(pars),len(pars)), dtype=pars.dtype) 202 if grad_func: 203 score = grad_func(pars, grads) 204 else: 205 score = func(pars) 206 iterations = 1 207 on_iter(pars, grads, iterations, score) 208 return score, pars, iterations, grads, Hessian
209 210 # add_opt("Null", null_optimize, {'whatever': True}) 211 212
213 -def num_grad(func, nx, dx=1e-6):
214 def grad_func(pars, grads, dx_specific=None): 215 d = dx_specific or dx 216 score = func(pars) 217 xpars = numpy.array(pars[:nx], dtype='float64', copy=True) 218 for ix in xrange(nx): 219 xpars[:nx] = pars[:nx] 220 xpars[ix] += d 221 xscore = func(xpars) 222 grads[ix] = (xscore - score) / d 223 # print ix, d, score, xscore, grads[ix] 224 return score
225 return grad_func 226
227 -def dfp_optimize(options, pars, func, on_iter, grad_func=None):
228 nx = len(pars) 229 dx = options['num grad dx'] 230 gradf = grad_func or num_grad(func, nx, dx) 231 interrupted = [False] 232 out_iterations = [0] 233 out_pars = numpy.array(pars) 234 perturb = options['perturb'] 235 if perturb: 236 for i,p in enumerate(pars): 237 pars[i] = p*(1.0 + perturb*(random.random() - .5)) 238 def dfp_func(pars, grads, do_grads): 239 try: 240 if do_grads: 241 return gradf(pars[:nx], grads, dx) 242 else: 243 return func(pars[:nx]) 244 except: 245 traceback.print_exc() 246 raise
247 def dfp_iter(iterations, nf, ndf, ll, pars, grads): 248 try: 249 out_iterations[0] = iterations 250 out_pars[:] = pars[:nx] 251 on_iter(pars[:nx], grads[:nx], iterations, ll) 252 return 0 253 except: 254 traceback.print_exc() 255 return -1 256 257 minval, errno, grads, Hessian = qubx.fast.fit.dfpmin(pars, dfp_func, dfp_iter, options['max iterations'], 258 options['conv LL'], options['conv grad'], options['max step']) 259 return minval, out_pars, out_iterations[0], grads, Hessian 260 261 add_opt('DFP', dfp_optimize, 262 {'num grad dx' : 1e-4, 263 'max iterations' : 100, 264 'max restarts' : 0, 265 'conv LL' : 1e-6, 266 'conv grad' : 1e-6, 267 'max step' : 1e-1, 268 'perturb' : 1e-2}, 269 priority=8) 270 271
272 -def bfgs_optimize(options, pars, func, on_iter, grad_func=None):
273 nx = len(pars) 274 dx = options['num grad dx'] 275 gradf = grad_func or num_grad(func, nx, dx) 276 grads = numpy.zeros(shape=pars.shape, dtype=pars.dtype) 277 Hessian = numpy.zeros(shape=(len(pars),len(pars)), dtype=pars.dtype) 278 iters = [0] 279 score_of_pars = {} 280 281 def bfgs_func(pars): 282 score = func(pars) 283 score_of_pars[tuple(pars[:len(grads)])] = score 284 return score
285 def bfgs_grad_func(pars): 286 score = gradf(pars, grads, dx) 287 return grads 288 def bfgs_iter(pars): 289 iters[0] += 1 290 on_iter(pars, grads, iters[0], score_of_pars[tuple(pars[:len(grads)])]) 291 292 result = scipy.optimize.fmin_bfgs(bfgs_func, pars, bfgs_grad_func, gtol=options['conv grad'], 293 maxiter=options['max iterations'], 294 full_output=1, disp=1, retall=0, callback=bfgs_iter) 295 pars, score, grads, Hessian, nf, ng, warnflag = result 296 return score, pars, iters[0], grads, Hessian 297 298 299 add_opt('BFGS', bfgs_optimize, 300 {'num grad dx' : 1e-4, 301 'max iterations' : 100, 302 'conv LL' : 1e-6, 303 'conv grad' : 1e-6}, 304 priority=0) 305 306
307 -def simplex_optimize(options, pars, func, on_iter, grad_func=None):
308 grads = numpy.zeros(shape=pars.shape, dtype=pars.dtype) 309 Hessian = numpy.zeros(shape=(len(pars),len(pars)), dtype=pars.dtype) 310 iters = [0] 311 score_of_pars = {} 312 313 def simplex_func(pars): 314 score = func(pars) 315 score_of_pars[tuple(pars[:len(grads)])] = score 316 return score
317 def simplex_iter(pars): 318 iters[0] += 1 319 on_iter(pars, grads, iters[0], score_of_pars[tuple(pars[:len(grads)])]) 320 321 pars, score, iterations, nf, warnflag = \ 322 scipy.optimize.fmin(simplex_func, pars, maxfun=options['max f calls'], 323 xtol=options['xtol'], ftol=options['ftol'], maxiter=options['max iterations'], 324 full_output=1, disp=1, retall=0, callback=simplex_iter) 325 return score, pars, iterations, grads, Hessian 326 327 add_opt('Simplex', simplex_optimize, 328 {'max f calls' : 400, 329 'max iterations' : 100, 330 'xtol' : 1e-6, 331 'ftol' : 1e-6}, 332 priority=2) 333 334 335 #def simplex_dfp_optimize(options, pars, func, on_iter, grad_func=None): 336 # options['max iterations'] = options['simplex iterations'] 337 # options['max f calls'] = options['simplex f calls'] 338 # score, pars, iterations, grads, Hessian = simplex_optimize(options, pars, func, on_iter, grad_func) 339 # options['max iterations'] = options['dfp iterations'] 340 # options['max restarts'] = options['dfp restarts'] 341 # return dfp_optimize(options, pars, func, on_iter, grad_func) 342 # 343 #add_opt('Simplex-DFP', simplex_dfp_optimize, 344 # {'simplex f calls' : 400, 345 # 'simplex iterations' : 100, 346 # 'xtol' : 1e-6, 347 # 'ftol' : 1e-6, 348 # 'num grad dx' : 1e-4, 349 # 'dfp iterations' : 100, 350 # 'dfp restarts' : 0, 351 # 'conv LL' : 1e-6, 352 # 'conv grad' : 1e-6, 353 # 'max step' : 1e-1}, 354 # priority=0) 355