1
2 """Globally available pluggable optimizer framework.
3
4 Copyright 2008-2012 Research Foundation State University of New York
5 This file is part of QUB Express.
6
7 QUB Express is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
11
12 QUB Express is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License,
18 named LICENSE.txt, in the QUB Express program directory. If not, see
19 <http://www.gnu.org/licenses/>.
20
21 """
22
23 import numpy
24 import random
25 import scipy
26 import scipy.optimize
27 import traceback
28 import qubx.fast.fit
29 import qubx.settings
30 import qubx.tree
31 import qubx.util_types
32
33
34
35
36
37
38
39
40
41
42
43
44
45 g_optimizers = []
46
47
48
49 g_opt_ix = {}
50 OnOptimizerAdded = qubx.util_types.WeakEvent()
51 OnOptOptions = qubx.util_types.WeakEvent()
52
53 -def add_opt(name, optimize, defaults, priority=5):
65
73
74 -def setup_opt(profile, defaults={}, method=None):
95
97 qubx.settings.InitSettings()
98 pro = qubx.settings.SettingsMgr[profile].active
99 methods = pro['Methods']
100 opts = []
101 for opt in g_optimizers:
102 optname = opt['name']
103 priority = qubx.tree.node_data_or_set_def(methods, optname, opt['priority'])
104 if priority:
105 mpro = pro[optname]
106 sysdef = opt['defaults']
107 for k in sysdef:
108 if not mpro[k].data:
109 if k in defaults:
110 mpro[k].data = defaults[k]
111 else:
112 mpro[k].data = sysdef[k]
113 opts.append( (priority, optname, opt, mpro) )
114 opts.sort()
115 return opts
116
122
130
131 -def get_options(profile="default_optimize", options={}, method=None):
143
144 -def set_options(profile="default_optimize", options={}, method=None):
158
159 -def get_opt(profile="default_optimize", options={}):
160 rec = setup_opt(profile)
161 optf = rec['optimize']
162 opts = get_options(profile, options)
163 def optimize(pars, func, on_iter, grad_func=None):
164 restarts = 1
165 if ('max restarts' in opts) and (restarts < opts['max restarts']):
166 restarts += opts['max restarts']
167 while restarts:
168 restarts -= 1
169 score, pars, iterations, grads, Hessian = optf(opts, pars, func, on_iter, grad_func)
170 if ('max iterations' in opts) and (iterations < opts['max iterations']):
171 break
172 return score, pars, iterations, grads, Hessian
173 return optimize
174
175 -def get_opts(profile="default_optimize", options={}, on_start=lambda rec, opts: None, on_end=lambda rec, opts: None):
176 def optimize(pars, func, on_iter, grad_func=None):
177 score, iterations, grads, Hessian = 0.0, 0, numpy.array([0.0]*len(pars)), None
178 for priority, optname, rec, opts in setup_opts(profile):
179 optf = rec['optimize']
180 opts = get_options(profile, options, method=optname)
181 restarts = 1
182 if ('max restarts' in opts) and (restarts < opts['max restarts']):
183 restarts += opts['max restarts']
184 on_start(rec, opts)
185 while restarts:
186 restarts -= 1
187 score, pars, iterations, grads, Hessian = optf(opts, pars, func, on_iter, grad_func)
188 if ('max iterations' in opts) and (iterations < opts['max iterations']):
189 break
190 on_end(rec, opts)
191 return score, pars, iterations, grads, Hessian
192 return optimize
193
194 -def optimize(pars, func, on_iter, grad_func=None, profile="default_optimize", options={}, multi_method=False):
196
197
199
200 grads = numpy.zeros(shape=pars.shape, dtype=pars.dtype)
201 Hessian = numpy.zeros(shape=(len(pars),len(pars)), dtype=pars.dtype)
202 if grad_func:
203 score = grad_func(pars, grads)
204 else:
205 score = func(pars)
206 iterations = 1
207 on_iter(pars, grads, iterations, score)
208 return score, pars, iterations, grads, Hessian
209
210
211
212
214 def grad_func(pars, grads, dx_specific=None):
215 d = dx_specific or dx
216 score = func(pars)
217 xpars = numpy.array(pars[:nx], dtype='float64', copy=True)
218 for ix in xrange(nx):
219 xpars[:nx] = pars[:nx]
220 xpars[ix] += d
221 xscore = func(xpars)
222 grads[ix] = (xscore - score) / d
223
224 return score
225 return grad_func
226
227 -def dfp_optimize(options, pars, func, on_iter, grad_func=None):
228 nx = len(pars)
229 dx = options['num grad dx']
230 gradf = grad_func or num_grad(func, nx, dx)
231 interrupted = [False]
232 out_iterations = [0]
233 out_pars = numpy.array(pars)
234 perturb = options['perturb']
235 if perturb:
236 for i,p in enumerate(pars):
237 pars[i] = p*(1.0 + perturb*(random.random() - .5))
238 def dfp_func(pars, grads, do_grads):
239 try:
240 if do_grads:
241 return gradf(pars[:nx], grads, dx)
242 else:
243 return func(pars[:nx])
244 except:
245 traceback.print_exc()
246 raise
247 def dfp_iter(iterations, nf, ndf, ll, pars, grads):
248 try:
249 out_iterations[0] = iterations
250 out_pars[:] = pars[:nx]
251 on_iter(pars[:nx], grads[:nx], iterations, ll)
252 return 0
253 except:
254 traceback.print_exc()
255 return -1
256
257 minval, errno, grads, Hessian = qubx.fast.fit.dfpmin(pars, dfp_func, dfp_iter, options['max iterations'],
258 options['conv LL'], options['conv grad'], options['max step'])
259 return minval, out_pars, out_iterations[0], grads, Hessian
260
261 add_opt('DFP', dfp_optimize,
262 {'num grad dx' : 1e-4,
263 'max iterations' : 100,
264 'max restarts' : 0,
265 'conv LL' : 1e-6,
266 'conv grad' : 1e-6,
267 'max step' : 1e-1,
268 'perturb' : 1e-2},
269 priority=8)
270
271
285 def bfgs_grad_func(pars):
286 score = gradf(pars, grads, dx)
287 return grads
288 def bfgs_iter(pars):
289 iters[0] += 1
290 on_iter(pars, grads, iters[0], score_of_pars[tuple(pars[:len(grads)])])
291
292 result = scipy.optimize.fmin_bfgs(bfgs_func, pars, bfgs_grad_func, gtol=options['conv grad'],
293 maxiter=options['max iterations'],
294 full_output=1, disp=1, retall=0, callback=bfgs_iter)
295 pars, score, grads, Hessian, nf, ng, warnflag = result
296 return score, pars, iters[0], grads, Hessian
297
298
299 add_opt('BFGS', bfgs_optimize,
300 {'num grad dx' : 1e-4,
301 'max iterations' : 100,
302 'conv LL' : 1e-6,
303 'conv grad' : 1e-6},
304 priority=0)
305
306
317 def simplex_iter(pars):
318 iters[0] += 1
319 on_iter(pars, grads, iters[0], score_of_pars[tuple(pars[:len(grads)])])
320
321 pars, score, iterations, nf, warnflag = \
322 scipy.optimize.fmin(simplex_func, pars, maxfun=options['max f calls'],
323 xtol=options['xtol'], ftol=options['ftol'], maxiter=options['max iterations'],
324 full_output=1, disp=1, retall=0, callback=simplex_iter)
325 return score, pars, iterations, grads, Hessian
326
327 add_opt('Simplex', simplex_optimize,
328 {'max f calls' : 400,
329 'max iterations' : 100,
330 'xtol' : 1e-6,
331 'ftol' : 1e-6},
332 priority=2)
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355