Package mvpa :: Package measures :: Module splitmeasure
[hide private]
[frames] | no frames]

Source Code for Module mvpa.measures.splitmeasure

  1  # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- 
  2  # vi: set ft=python sts=4 ts=4 sw=4 et: 
  3  ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## 
  4  # 
  5  #   See COPYING file distributed along with the PyMVPA package for the 
  6  #   copyright and license terms. 
  7  # 
  8  ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## 
  9  """This is a `FeaturewiseDatasetMeasure` that uses another 
 10  `FeaturewiseDatasetMeasure` and runs it multiple times on differents splits of 
 11  a `Dataset`. 
 12  """ 
 13   
 14  __docformat__ = 'restructuredtext' 
 15   
 16  import numpy as N 
 17  from mvpa.measures.base import FeaturewiseDatasetMeasure 
 18  from mvpa.datasets.splitters import NoneSplitter 
 19  from mvpa.misc.state import StateVariable 
 20  from mvpa.misc.transformers import FirstAxisMean 
 21   
 22  if __debug__: 
 23      from mvpa.base import debug 
 24   
 25   
26 -class SplitFeaturewiseMeasure(FeaturewiseDatasetMeasure):
27 """This is a `FeaturewiseDatasetMeasure` that uses another 28 `FeaturewiseDatasetMeasure` and runs it multiple times on differents 29 splits of a `Dataset`. 30 31 When called with a `Dataset` it returns the mean sensitivity maps of all 32 data splits. 33 34 Additonally this class supports the `State` interface. Several 35 postprocessing functions can be specififed to the constructor. The results 36 of the functions specified in the `postproc` dictionary will be available 37 via their respective keywords. 38 """ 39 40 maps = StateVariable(enabled=False, 41 doc="To store maps per each split") 42
43 - def __init__(self, sensana, 44 splitter=NoneSplitter, 45 combiner=FirstAxisMean, 46 **kwargs):
47 """Cheap initialization. 48 49 :Parameters: 50 sensana : FeaturewiseDatasetMeasure 51 that shall be run on the `Dataset` splits. 52 splitter : Splitter 53 used to split the `Dataset`. By convention the first dataset 54 in the tuple returned by the splitter on each iteration is used 55 to compute the sensitivity map. 56 combiner 57 This functor will be called on an array of sensitivity maps 58 and the result will be returned by __call__(). The result of 59 a combiner must be an 1d ndarray. 60 """ 61 # init base classes first 62 FeaturewiseDatasetMeasure.__init__(self, **kwargs) 63 64 self.__sensana = sensana 65 """Sensitivity analyzer used to compute the sensitivity maps. 66 """ 67 self.__splitter = splitter 68 """Splitter instance used to split the datasets.""" 69 self.__combiner = combiner 70 """Function to combine sensitivities to serve a result of 71 __call__()"""
72 73
74 - def _call(self, dataset):
75 """Compute sensitivity maps for all dataset splits and run the 76 postprocessing functions afterward (if any). 77 78 Returns a list of all computed sensitivity maps. Postprocessing results 79 are available via the objects `State` interface. 80 """ 81 82 maps = [] 83 84 # splitter 85 for split in self.__splitter(dataset): 86 # compute sensitivity using first dataset in split 87 sensitivity = self.__sensana(split[0]) 88 89 maps.append(sensitivity) 90 91 self.maps = maps 92 """Store the maps across splits""" 93 94 # return all maps 95 return self.__combiner(maps)
96 97 98
99 -class TScoredFeaturewiseMeasure(SplitFeaturewiseMeasure):
100 """`SplitFeaturewiseMeasure` computing featurewise t-score of 101 sensitivities across splits. 102 """
103 - def __init__(self, sensana, splitter, noise_level=0.0, **kwargs):
104 """Cheap initialization. 105 106 :Parameters: 107 sensana : SensitivityAnalyzer 108 that shall be run on the `Dataset` splits. 109 splitter : Splitter 110 used to split the `Dataset`. By convention the first dataset 111 in the tuple returned by the splitter on each iteration is used 112 to compute the sensitivity map. 113 noise_level: float 114 Theoretical output of the respective `SensitivityAnalyzer` 115 for a pure noise pattern. For most algorithms this is probably 116 zero, hence the default. 117 """ 118 # init base classes first 119 # - get full sensitivity maps from SplittingSensitivityAnalyzer 120 # - no postprocessing 121 # - leave States handling to base class 122 SplitFeaturewiseMeasure.__init__(self, 123 sensana, 124 splitter, 125 combiner=N.array, 126 **kwargs) 127 128 self.__noise_level = noise_level 129 """Output of the sensitivity analyzer when there is no signal."""
130 131
132 - def _call(self, dataset, callables=[]):
133 """Compute sensitivity maps for all dataset splits and return the 134 featurewise t-score of them. 135 """ 136 # let base class compute the sensitivity maps 137 maps = SplitFeaturewiseMeasure._call(self, dataset) 138 139 # feature wise mean 140 m = N.mean(maps, axis=0) 141 #m = N.min(maps, axis=0) 142 # featurewise variance 143 v = N.var(maps, axis=0) 144 # degrees of freedom (n-1 for one-sample t-test) 145 df = maps.shape[0] - 1 146 147 # compute t-score 148 t = (m - self.__noise_level) / N.sqrt(v * (1.0 / maps.shape[0])) 149 150 if __debug__: 151 debug('SA', 'T-score sensitivities computed for %d maps ' % 152 maps.shape[0] + 153 'min=%f max=%f. mean(m)=%f mean(v)=%f Result min=%f max=%f mean(abs)=%f' % 154 (N.min(maps), N.max(maps), N.mean(m), N.mean(v), N.min(t), 155 N.max(t), N.mean(N.abs(t)))) 156 157 return t
158