1 """Provides a visual panel (L{qubx.faces.Face}) to idealize single-molecule data.
2
3 Uses math from U{qubx_single_channel<http://www.qub.buffalo.edu/src/qub-express/api/qubx_single_channel>}
4
5 /* Copyright 2008-2014 Research Foundation State University of New York */
6
7 /* This file is part of QUB Express. */
8
9 /* QUB Express is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation, either version 3 of the License, or */
12 /* (at your option) any later version. */
13
14 /* QUB Express is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
18
19 /* You should have received a copy of the GNU General Public License, */
20 /* named LICENSE.txt, in the QUB Express program directory. If not, see */
21 /* <http://www.gnu.org/licenses/>. */
22
23 """
24
25 import itertools
26 import gc
27 import os
28 import sys
29 import time
30 import traceback
31 import gobject
32
33 from qubx.data_types import *
34 from qubx.util_types import *
35 from qubx.faces import *
36 from qubx.GTK import *
37 import qubx.fast.model
38 import qubx.idealize
39 import qubx.notebook
40 import qubx.optimize
41 import qubx.optimizeGTK
42 import qubx.pyenv
43 import qubx.settings
44 import qubx.tree
45 from qubx.model import *
46 from qubx.settings import Property, Propertied
47 from qubx.task import *
48 import qubx_idealize
49 from qubx_idealize import QUBX_IDLZ_METHODS
50
51 import scipy.optimize
52 from numpy import *
53
54 import ctypes
55 from ctypes import c_int, c_float, c_double, c_void_p, c_char_p, Structure, POINTER, CFUNCTYPE, byref, sizeof, py_object, pointer, cast
56 pybuf = ctypes.pythonapi.PyBuffer_FromReadWriteMemory
57 pybuf.restype = py_object
58
59
60 MAXITER = 300
61 MAX_POINTS_AT_ONCE = (1 << 24)
62
63 -def get_data(segs, receiver, last_data=None):
104 def receive_stims():
105 iseg = -1
106 for seg in segs:
107 data.file = seg.file
108 data.iSignal = seg.signal
109 stimulus.call_with_stimulus_idl(rcv_hack, [(chunk.f, chunk.l) for chunk in seg.chunks])
110 stim_names, stimidl = (yield)
111 chunkidl = [ Anon(dwellCounts=[], classeses=[], durationses=[], ampses=[]) for chunk in seg.chunks]
112 for segs_flcda in stimidl:
113 for ichunk, flcda in enumerate(segs_flcda):
114 ff, ll, cc, dd, aa = flcda
115 chunkidl[ichunk].dwellCounts.append(len(cc))
116 chunkidl[ichunk].classeses.append(cc)
117 chunkidl[ichunk].durationses.append(dd)
118 chunkidl[ichunk].ampses.append(aa)
119
120 iseg += 1
121 data.append_segment(seg.f)
122 for ichunk, chunk in enumerate(seg.chunks):
123 Nd = chunk.n
124 yy = None
125 if chunk.included:
126 chunkoff = chunk.f - seg.f
127 yy = zeros(shape=(Nd,), dtype='float32')
128 ii = 0
129 for ch in generate_chunk_samples([chunk]):
130 yy[ii:ii+ch.n] = ch.samples
131 ii += ch.n
132 ff = arange(seg.f+chunkoff, seg.f+chunkoff+Nd, dtype='int32')
133 ll = ff + 1
134 data.ff_ll_counts.append( (ff, ll, Nd) )
135 data.append_chunk(iseg, Nd, yy,
136 chunkidl[ichunk].dwellCounts, chunkidl[ichunk].classeses,
137 chunkidl[ichunk].durationses, chunkidl[ichunk].ampses)
138 gobject.idle_add(receiver, data)
139 yield None
140
141 rcv_stims = receive_stims()
142 rcv_stims.next()
143
144
145 @Propertied(Property("usePeq", True, "True to start each segment at equilibrium; False to use States:Pr"),
146 Property("Gleak", 0.0, "Leakage conductance (pS)"),
147 Property("Vleak", 0.0, "Leak voltage (mV)"),
148 Property("resolution", 0.05, "Gaussian sampling (pA)"),
149 Property("reest_amps", True, "True to recompute amplitudes each iteration"),
150 Property("reest_rates", False, "True to recompute transition probabilities each iteration"),
151 Property("baseline", False, "True to track baseline with Kalman filter"),
152 Property("baseline_std", 1e-3, "Kalman process standard deviation"),
153 Property("baseline_nodes", True, "True to add baseline nodes, if baseline"),
154 Property("baseline_nodes_delta", 0.05, "Minimum y-distance between nodes")
155 )
157 __explore_featured = ['global_name', 'robot', 'shared_controls', 'method', 'qubx_finalize', 'idealize']
159 gtk.VBox.__init__(self)
160 self.global_name = 'QubX.Modeling.Idealize.methods["Threshold"]'
161 self.__ref = Reffer()
162 self.propertied_connect_settings('qubx_single.idealize')
163 self.__running = False
164 self.set_size_request(200, 50)
165 self.QubX = qubx.pyenv.env.globals['QubX']
166 self.QubX.OnQuit += self.__ref(self.qubx_finalize)
167 self.robot = Robot('Idealize', self.__ref(lambda: Tasks.add_task(self.robot)),
168 self.__ref(lambda: Tasks.remove_task(self.robot)))
169 self.robot.OnException += self.__ref(self.__onException)
170 self.robot.OnInterrupt += self.__ref(self.__onInterrupt)
171
172 llstuf = pack_item(gtk.HBox(), self)
173 col = pack_item(gtk.VBox(), llstuf)
174 self.chkPeq = pack_check('start at equilibrium p', col)
175 self.chkPeq.set_tooltip_text('otherwise use States:Pr')
176 self.propertied_connect_check('usePeq', self.chkPeq)
177
178 pack_space(llstuf, x=20)
179 col = pack_item(gtk.VBox(), llstuf)
180 h = pack_item(gtk.HBox(), col)
181 pack_label('Gleak:', h)
182 self.txtGleak = pack_item(NumEntry(self.Gleak, acceptFloat, '%.3g', width_chars=6), h, at_end=True)
183 self.propertied_connect_NumEntry('Gleak', self.txtGleak)
184
185 pack_space(llstuf, expand=True)
186 col = pack_item(gtk.VBox(), llstuf)
187 h = pack_item(gtk.HBox(), col)
188 pack_label('Vleak:', h)
189 self.txtVleak = pack_item(NumEntry(self.Vleak, acceptFloat, '%.3g', width_chars=6), h, at_end=True)
190 self.propertied_connect_NumEntry("Vleak", self.txtVleak)
191
192
193
194
195
196
197
198
199
200
201
202 optstuf = pack_item(gtk.HBox(), self, expand=True)
203 self.txtStatus = gtk.TextView()
204 self.outStatus = TextViewAppender(self.txtStatus)
205 self.txtStatus.set_editable(False)
206 SetFixedWidth(self.txtStatus)
207 pack_scrolled(self.txtStatus, optstuf, expand=True)
208 self.outStatus.write("""Based on "Restoration of single-channel currents using the segmental k-means method based on hidden Markov modeling," Qin F, Biophys J. 2004 Mar; 86(3):1488-501.
209
210 Before idealizing, make sure to "Grab" the amplitude distributions)
211 (right-click a state in the model).
212 """)
213 col = pack_item(gtk.VBox(), optstuf)
214 self.chkReestAmps = pack_check("re-est amps", col)
215 self.propertied_connect_check("reest_amps", self.chkReestAmps)
216 self.chkReestRates = pack_check("re-est rates", col)
217 self.propertied_connect_check("reest_rates", self.chkReestRates)
218 row = pack_item(gtk.HBox(), col)
219 self.chkBaseline = pack_check("baseline std:", row)
220 self.propertied_connect_check("baseline", self.chkBaseline)
221 self.txtBaseline = pack_item(NumEntry(self.baseline_std, acceptFloatGreaterThan(0.0), '%.3g', width_chars=6), row, expand=True)
222 self.propertied_connect_NumEntry("baseline_std", self.txtBaseline)
223 row = pack_item(gtk.HBox(), col)
224 self.chkBaselineNodes = pack_check("add nodes, delta:", row)
225 self.propertied_connect_check("baseline_nodes", self.chkBaselineNodes)
226 self.chkBaselineNodes.set_tooltip_text('To see and edit baseline nodes, choose the Baseline tool in the Data panel')
227 self.txtBaselineDelta = pack_item(NumEntry(self.baseline_nodes_delta, acceptFloatGreaterThan(0.0), '%.3g', width_chars=6), row, expand=True)
228 self.propertied_connect_NumEntry('baseline_nodes_delta', self.txtBaselineDelta)
229 self.txtBaselineDelta.set_tooltip_text('Minimum vertical distance between nodes')
230 self.shared_controls = pack_item(gtk.HBox(), col, at_end=True)
231
232 self.__method = "Threshold"
236 method = property(lambda self: self.__method, lambda self, x: self.set_method(x))
241 thresh = self.method == 'Threshold'
242 self.chkReestAmps.set_sensitive(not thresh)
243 self.chkReestRates.set_sensitive(not thresh)
244 self.chkBaseline.set_sensitive(not thresh)
245 self.txtBaseline.set_sensitive((not thresh) and self.baseline)
246 self.chkBaselineNodes.set_sensitive((not thresh) and self.baseline)
247 self.txtBaselineDelta.set_sensitive((not thresh) and self.baseline and self.baseline_nodes)
250
252 traceback.print_exception(typ, val, trace, file=self.outStatus)
254 for ff, ll, cc, aa, ss in dwells_per_seg:
255 self.idl_updater.set_dwells(len(ff), ff, ll, cc, aa, ss, event=True)
256 if nodes_per_seg:
257 for ff, ll, cc, aa, ss in dwells_per_seg:
258 if len(ff):
259 self.datafile.baseline[self.iSignal].clear_nodes(ff[0], ll[-1])
260 for nodes in nodes_per_seg:
261 self.__set_nodes(*nodes)
263 self.outStatus.write('Done.\n')
264 if ll:
265 self.outStatus.write("LL: %f\n" % ll)
279 - def robot_setup(self, mtree, Peq, Gleak, Vleak, segs):
290 model = QubModel()
291 if self.robot.mtree:
292 model.from_tree(self.robot.mtree)
293 try:
294 self.robot.single_model.dispose()
295 except:
296 pass
297 self.robot.single_model = ModelToFast(model, data.signal_names, self.robot_on_status)
298 self.robot.single_model.usePeq = self.robot.Peq
299 self.robot.single_model.iVoltage = data.iVoltage
300 self.robot.single_model.Gleak = self.robot.Gleak
301 self.robot.single_model.Vleak = self.robot.Vleak
302 if model.channelCount > 16:
303 self.robot_on_status("Warning: Channel count > 16 not supported; setting it to 16.")
304 self.robot.single_model.Nchannel = 16
305
306 self.robot.multi_model = qubx.fast.model.MultiModel(self.robot.single_model)
307 self.robot.model = self.robot.multi_model.multi_model
308 self.robot.K0, self.robot.K1, self.robot.L, self.robot.V = ModelToRateMatrices(model, data.signal_names)
309 self.robot.multi_model.update()
310
311 stimclasses = self.robot.data.get_stimclasses()
312 Nstim = self.robot.data.Nstim
313 Nsig = self.robot.data.Nsig
314 sc_frac = self.robot.data.get_stimclass_frac()
315 self.robot.ampm = qubx.fast.model.StimAmps(self.robot.model, Nstim, Nsig, stimclasses, sc_frac)
316 if self.robot.method != 'Threshold':
317 self.robot.ratesm = qubx.fast.model.StimRates(self.robot.model, Nstim, Nsig, stimclasses, sc_frac,
318 self.robot.data.sampling)
319 A, B = model.constraints_kin.get_matrices(self.robot.K0, self.robot.K1, self.robot.L, self.robot.V, auto=False)
320 if A != None:
321 self.robot.ratesm.cns.Ain[:A.shape[0],:A.shape[1]] = A
322 self.robot.ratesm.cns.Bin[:B.shape[0]] = B.flatten()
323 self.robot.ratesm.cns.Ncns = B.shape[0]
324 else:
325 self.robot.ratesm.cns.Ncns = 0
326 - def idealize(self, segments, model, on_finish):
342 dwells = None
343 with self.robot.main_hold:
344 self.datafile = segments[0].file
345 self.__model = model
346 self.iSignal = segments[0].signal
347 mtree = model.as_tree()
348 Peq = self.usePeq
349 Gleak = self.Gleak
350 Vleak = self.Vleak
351 amps = mtree['Amps'].data
352 self.robot.method = self.method
353 self.robot.resolution = 10**int(round(log10((max(amps) - min(amps)) * 1e-2)))
354 self.robot.reest_amps = self.reest_amps
355 self.robot.reest_rates = self.reest_rates
356 self.robot.track_baseline = self.baseline
357 self.robot.baseline_std = self.baseline_std
358 self.robot.baseline_nodes = self.baseline_nodes
359 self.robot.baseline_nodes_delta = self.baseline_nodes_delta
360 self.robot.segs = self.QubX.DataSource.get_segmentation(baseline_nodes=(not self.robot.track_baseline))
361 segs_lst = break_segs(segments, MAX_POINTS_AT_ONCE)
362 try:
363 self.robot.progress_before = 0
364 self.robot.progress_segs = segs_lst and (100.0 / len(segs_lst)) or 100.0
365 ll = 0.0
366 for segs in segs_lst:
367 self.robot_setup(mtree, Peq, Gleak, Vleak, segs)
368 if self.robot.method == 'Threshold':
369 ll += qubx_idealize.halfamp(self.robot.model, self.robot.data, self.robot.ampm)
370 else:
371 ll += qubx_idealize.skm(self.robot.model, self.robot.data, self.robot.ampm, self.robot.ratesm,
372 self.robot.resolution, (self.robot.method == 'SKM') and 2 or 1,
373 self.robot.reest_amps, self.robot.reest_rates,
374 self.robot.track_baseline, self.robot.baseline_std)
375 dwells = [self.robot_renumber_dwell_classes(*self.robot.data.get_dwells(s)) for s in xrange(len(segs))]
376 if self.robot.track_baseline and self.robot.baseline_nodes:
377 nodes = [resample_baseline(self.robot.data.get_baseline(s), self.robot.baseline_nodes_delta, seg.f) for s, seg in enumerate(segs)]
378 else:
379 nodes = None
380 gobject.idle_add(self.__onSetDwells, dwells, nodes)
381 self.robot.progress_before += self.robot.progress_segs
382 except:
383 self.robot_on_status(traceback.format_exc())
384 ll = 0.0
385 self.__running = False
386 gobject.idle_add(self.__after_set_dwells, ll)
387 gobject.idle_add(on_finish)
389 qubx.notebook.Notebook.send(qubx.notebook.NbText("""Idealize: %s
390 model: %s
391 method: %s
392 re-estimate: %s
393 track baseline: %s
394 resolution: %s
395 LL: %f""" % (self.datafile.path,
396 self.__model.path,
397 self.robot.method,
398 "%s%s" % ((self.robot.reest_amps and "amps" or ""),
399 (self.robot.reest_rates and "rates" or "")),
400 (self.robot.track_baseline and ("""yes
401 baseline std: %.4g""" % self.robot.baseline_std) or "no"),
402 self.robot.resolution, ll)), auto_id='Idealize.Messages')
403 if self.idl_updater:
404 self.idl_updater.done()
405 self.datafile = self.__model = self.__segs = self.__out_list = self.idl_updater = None
407 gobject.idle_add(self.outStatus.write, msg+"\n")
409 if (self.robot.single_model.Nchannel == 1) and len(ff) and len(aa):
410 class_order = self.robot.multi_model.class_order[:self.robot.multi_model.nclass_order]
411 nc = max(class_order) + 1
412 cc = numpy.array([class_order[c] for c in cc], dtype='int32')
413 aa2 = numpy.zeros(shape=(nc,), dtype='float64')
414 ss2 = numpy.zeros(shape=(nc,), dtype='float64')
415 for ic, c in enumerate(class_order):
416 if ic < len(aa):
417 aa2[c] = aa[ic]
418 ss2[c] = ss[ic]
419 aa, ss = aa2, ss2
420 return ff, ll, cc, aa, ss
421
427 segs_lst = []
428 segs_one = []
429 sofar = 0
430 for seg in segs:
431 n = ((seg.chunks and sum(chunk.included and chunk.n or 0 for chunk in seg.chunks))
432 or (seg.included and seg.n or 0))
433 if (sofar + n) > maxpoints:
434 if segs_one:
435 segs_lst.append(segs_one)
436 segs_one = []
437 sofar = 0
438 if n > maxpoints:
439 if seg.chunks:
440 chunks = break_segs(seg.chunks, maxpoints)
441 for chunklst in chunks:
442 segs_lst.append([Anon(seg, f=chunklst[0].f, l=chunklst[-1].l, n=(chunklst[-1].l - chunklst[0].f + 1),
443 chunks=chunklst)])
444 else:
445 K = int(ceil(n*1.0/maxpoints))
446 sub_n = n / K
447 for k in xrange(K):
448 segs_lst.append([Anon(seg, f=seg.f+k*sub_n, l=min(seg.f+(k+1)*sub_n - 1, seg.l),
449 n=min(sub_n, seg.l - (seg.f+k*sub_n) + 1))])
450 else:
451 segs_one.append(seg)
452 sofar += n
453 if segs_one:
454 segs_lst.append(segs_one)
455 return segs_lst
456
464
465
466
467
468
469