1 | /*
|
---|
2 | * Class Regression
|
---|
3 | *
|
---|
4 | * Contains methods for simple linear regression
|
---|
5 | * (straight line), for multiple linear regression,
|
---|
6 | * for fitting data to a polynomial and for non-linear
|
---|
7 | * regression (Nelder and Mead Simplex method) for both user
|
---|
8 | * supplied functions and for a wide range of standard functions
|
---|
9 | *
|
---|
10 | * The sum of squares function needed by the non-linear regression methods
|
---|
11 | * non-linear regression methods is supplied by means of the interfaces,
|
---|
12 | * RegressionFunction or RegressionFunction2
|
---|
13 | *
|
---|
14 | * WRITTEN BY: Dr Michael Thomas Flanagan
|
---|
15 | *
|
---|
16 | * DATE: February 2002
|
---|
17 | * MODIFIED: 7 January 2006, 28 July 2006, 9 August 2006, 4 November 200621 November 2006, 21 December 2006,
|
---|
18 | * 14 April 2007, 9 June 2007, 25 July 2007, 23/24 August 2007, 14 September 2007, 28 December 2007,
|
---|
19 | * 18-26 March 2008, 7 April 2008, 27 April 2008, 10/12/19 May 2008, 5-6 July 2004, 28 July 2008,
|
---|
20 | * 29 August 2008, 5 September 2008, 6 October 2008, 13-15 October 2009, 13 November 2009, 10 December 2009,
|
---|
21 | * 20 December 2009, 12 January 2010, 18-25 May 2010, 9 July 2010, 10-16 August 2010, 21-29 October 2010
|
---|
22 | * 2-7 November 2010, 2 January 2011, 20-31 January 2011, 2-4 February 2011
|
---|
23 | *
|
---|
24 | * DOCUMENTATION:
|
---|
25 | * See Michael Thomas Flanagan's Java library on-line web page:
|
---|
26 | * http://www.ee.ucl.ac.uk/~mflanaga/java/Regression.html
|
---|
27 | * http://www.ee.ucl.ac.uk/~mflanaga/java/
|
---|
28 | *
|
---|
29 | * Copyright (c) 2002 - 2011 Michael Thomas Flanagan
|
---|
30 | *
|
---|
31 | * PERMISSION TO COPY:
|
---|
32 | *
|
---|
33 | * Permission to use, copy and modify this software and its documentation for NON-COMMERCIAL purposes is granted, without fee,
|
---|
34 | * provided that an acknowledgement to the author, Dr Michael Thomas Flanagan at www.ee.ucl.ac.uk/~mflanaga, appears in all copies
|
---|
35 | * and associated documentation or publications.
|
---|
36 | *
|
---|
37 | * Redistributions of the source code of this source code, or parts of the source codes, must retain the above copyright notice, this list of conditions
|
---|
38 | * and the following disclaimer and requires written permission from the Michael Thomas Flanagan:
|
---|
39 | *
|
---|
40 | * Redistribution in binary form of all or parts of this class must reproduce the above copyright notice, this list of conditions and
|
---|
41 | * the following disclaimer in the documentation and/or other materials provided with the distribution and requires written permission from the Michael Thomas Flanagan:
|
---|
42 | *
|
---|
43 | * Dr Michael Thomas Flanagan makes no representations about the suitability or fitness of the software for any or for a particular purpose.
|
---|
44 | * Dr Michael Thomas Flanagan shall not be liable for any damages suffered as a result of using, modifying or distributing this software
|
---|
45 | * or its derivatives.
|
---|
46 | *
|
---|
47 | ***************************************************************************************/
|
---|
48 |
|
---|
49 |
|
---|
50 | package agents.anac.y2015.agentBuyogV2.flanagan.analysis;
|
---|
51 |
|
---|
52 | import java.util.*;
|
---|
53 | import javax.swing.JOptionPane;
|
---|
54 |
|
---|
55 | import agents.anac.y2015.agentBuyogV2.flanagan.analysis.*;
|
---|
56 | import agents.anac.y2015.agentBuyogV2.flanagan.circuits.Impedance;
|
---|
57 | import agents.anac.y2015.agentBuyogV2.flanagan.interpolation.CubicSpline;
|
---|
58 | import agents.anac.y2015.agentBuyogV2.flanagan.io.*;
|
---|
59 | import agents.anac.y2015.agentBuyogV2.flanagan.math.*;
|
---|
60 | import agents.anac.y2015.agentBuyogV2.flanagan.plot.Plot;
|
---|
61 | import agents.anac.y2015.agentBuyogV2.flanagan.plot.PlotGraph;
|
---|
62 |
|
---|
63 |
|
---|
64 | // Regression class
|
---|
65 | public class Regression{
|
---|
66 |
|
---|
67 | protected int nData0=0; // number of y data points inputted (in a single array if multiple y arrays)
|
---|
68 | protected int nData=0; // number of y data points (nData0 times the number of y arrays)
|
---|
69 | protected int nXarrays=1; // number of x arrays
|
---|
70 | protected int nYarrays=1; // number of y arrays
|
---|
71 | protected int nTerms=0; // number of unknown parameters to be estimated
|
---|
72 | // multiple linear (a + b.x1 +c.x2 + . . ., = nXarrays + 1
|
---|
73 | // polynomial fitting; = polynomial degree + 1
|
---|
74 | // generalised linear; = nXarrays
|
---|
75 | // simplex = no. of parameters to be estimated
|
---|
76 | protected int degreesOfFreedom=0; // degrees of freedom = nData - nTerms
|
---|
77 | protected double[][] xData=null; // x data values
|
---|
78 | protected double[] yData=null; // y data values
|
---|
79 | protected double[] yCalc=null; // calculated y values using the regrssion coefficients
|
---|
80 | protected double[] weight=null; // weighting factors
|
---|
81 | protected double[] residual=null; // residuals
|
---|
82 | protected double[] residualW=null; // weighted residuals
|
---|
83 | protected boolean weightOpt=false; // weighting factor option
|
---|
84 | // = true; weights supplied
|
---|
85 | // = false; weigths set to unity in regression
|
---|
86 | // average error used in statistacal methods
|
---|
87 | // if any weight[i] = zero,
|
---|
88 | // weighOpt is set to false and
|
---|
89 | // all weights set to unity
|
---|
90 | protected int weightFlag=0; // weighting flag - weightOpt = false, weightFlag = 0; weightOpt = true, weightFlag = 1
|
---|
91 | protected String[] weightWord = {"", "Weighted "};
|
---|
92 |
|
---|
93 | protected double[] best = null; // best estimates vector of the unknown parameters
|
---|
94 | protected double[] bestSd =null; // standard deviation estimates of the best estimates of the unknown parameters
|
---|
95 | protected double[] pseudoSd = null; // Pseudo-nonlinear sd
|
---|
96 | protected double[] tValues = null; // t-values of the best estimates
|
---|
97 | protected double[] pValues = null; // p-values of the best estimates
|
---|
98 | protected double fixedInterceptL = 0.0; // Fixed intercept (linear regression)
|
---|
99 | protected double fixedInterceptP = 0.0; // Fixed intercept (polynomial fitting)
|
---|
100 |
|
---|
101 | protected double yMean=Double.NaN; // mean of y data
|
---|
102 | protected double yWeightedMean=Double.NaN; // weighted mean of y data
|
---|
103 | protected double chiSquare=Double.NaN; // chi square (observed-calculated)^2/variance; weighted error sum of squares
|
---|
104 | protected double reducedChiSquare=Double.NaN; // reduced chi square
|
---|
105 | protected double sumOfSquaresError=Double.NaN; // Sum of the squares of the residuals; unweighted error sum of squares
|
---|
106 | protected double sumOfSquaresTotal=Double.NaN; // Total sum of the squares
|
---|
107 | protected double sumOfSquaresRegrn=Double.NaN; // Regression sum of the squares
|
---|
108 |
|
---|
109 | protected double lastSSnoConstraint=0.0D; // Last sum of the squares of the residuals with no constraint penalty
|
---|
110 | protected double[][] covar=null; // Covariance matrix
|
---|
111 | protected double[][] corrCoeff=null; // Correlation coefficient matrix
|
---|
112 | protected double xyR = Double.NaN; // correlation coefficient between x and y data (y = a + b.x only)
|
---|
113 | protected double yyR = Double.NaN; // correlation coefficient between y calculted and y data (all regressions)
|
---|
114 | protected double multR = Double.NaN; // coefficient of determination
|
---|
115 | protected double adjustedR = Double.NaN; // adjusted coefficient of determination
|
---|
116 | protected double multipleF = Double.NaN; // coefficient of determination: F-ratio
|
---|
117 | protected double multipleFprob = Double.NaN; // coefficient of determination: F-ratio probability
|
---|
118 |
|
---|
119 | protected String[] paraName = null; // names of parameters, eg, mean, sd; c[0], c[1], c[2] . . .
|
---|
120 | protected int prec = 4; // number of places to which double variables are truncated on output to text files
|
---|
121 | protected int field = 13; // field width on output to text files
|
---|
122 |
|
---|
123 | protected int lastMethod=-1; // code indicating the last regression procedure attempted
|
---|
124 | // = 0 multiple linear regression, y = a + b.x1 +c.x2 . . .
|
---|
125 | // = 1 polynomial fitting, y = a +b.x +c.x^2 . . .
|
---|
126 | // = 2 generalised multiple linear y = a.f1(x) + b.f2(x) . . .
|
---|
127 | // = 3 Nelder and Mead simplex
|
---|
128 | // = 4 Fit to a Gaussian distribution (see also 38 below)
|
---|
129 | // = 5 Fit to a Lorentzian distribution
|
---|
130 | // = 6 Fit to a Poisson distribution
|
---|
131 | // = 7 Fit to a Two Parameter Gumbel distribution (minimum order statistic)
|
---|
132 | // = 8 Fit to a Two Parameter Gumbel distribution (maximum order statistic)
|
---|
133 | // = 9 Fit to a One Parameter Gumbel distribution (minimum order statistic)
|
---|
134 | // = 10 Fit to One Parameter Gumbel distribution (maximum order statistic)
|
---|
135 | // = 11 Fit to a Standard Gumbel distribution (minimum order statistic)
|
---|
136 | // = 12 Fit to a Standard Gumbel distribution (maximum order statistic)
|
---|
137 | // = 13 Fit to a Three parameter Frechet distribution
|
---|
138 | // = 14 Fit to a Two Parameter Frechet distribution
|
---|
139 | // = 15 Fit to a Standard Frechet distribution
|
---|
140 | // = 16 Fit to a Three parameter Weibull distribution
|
---|
141 | // = 17 Fit to a Two Parameter Weibull distribution
|
---|
142 | // = 18 Fit to a Standard Weibull distribution
|
---|
143 | // = 19 Fit to a Two Parameter Exponential distribution
|
---|
144 | // = 20 Fit to a One Parameter Exponential distribution
|
---|
145 | // = 21 Fit to a Standard Parameter Exponential distribution
|
---|
146 | // = 22 Fit to a Rayleigh distribution
|
---|
147 | // = 23 Fit to a Two Parameter Pareto distribution
|
---|
148 | // = 24 Fit to a One Parameter Pareto distribution
|
---|
149 | // = 25 Fit to a Sigmoidal Threshold Function
|
---|
150 | // = 26 Fit to a rectangular Hyperbola
|
---|
151 | // = 27 Fit to a scaled Heaviside Step Function
|
---|
152 | // = 28 Fit to a Hills/Sips Sigmoid
|
---|
153 | // = 29 Fit to a Shifted Pareto distribution
|
---|
154 | // = 30 Fit to a Logistic distribution
|
---|
155 | // = 31 Fit to a Beta distribution - [0, 1] interval
|
---|
156 | // = 32 Fit to a Beta distribution - [min, max] interval
|
---|
157 | // = 33 Fit to a Three Parameter Gamma distribution
|
---|
158 | // = 34 Fit to a Standard Gamma distribution
|
---|
159 | // = 35 Fit to an Erlang distribution
|
---|
160 | // = 36 Fit to a two parameter log-normal distribution
|
---|
161 | // = 37 Fit to a three parameter log-normal distribution
|
---|
162 | // = 38 Fit to a Gaussian distribution [allows fixed p-arameters] (see also 4 above)
|
---|
163 | // = 39 Fit to a EC50 dose response curve
|
---|
164 | // = 40 Fit to a LogEC50 dose response curve
|
---|
165 | // = 41 Fit to a EC50 dose response curve - bottom constrained
|
---|
166 | // = 42 Fit to a LogEC50 dose response curve- bottom constrained
|
---|
167 | // = 43 Fit to a simple exponential, A.exp(Bx)
|
---|
168 | // = 44 Fit to multiple exponentials
|
---|
169 | // = 45 Fit to a A(1 - exp(Bx))
|
---|
170 | // = 46 Fit to a constant
|
---|
171 | // = 47 Linear fit with fixed intercept
|
---|
172 | // = 48 Polynomial fit with a fixed intercept
|
---|
173 | // = 49 Multiple Gaussians
|
---|
174 | // = 50 Non-integer polynomial
|
---|
175 |
|
---|
176 | protected boolean bestPolyFlag = false; // = true if bestPolynomial called
|
---|
177 | protected int bestPolynomialDegree = 0; // degree of best polynomial fit
|
---|
178 | protected double fProbSignificance = 0.05; // significance level used in F-test in bestPolynomial method
|
---|
179 | protected ArrayList<Object> bestPolyArray = new ArrayList<Object>(); // array storing history of bestPolynomial search pathway
|
---|
180 |
|
---|
181 | protected boolean userSupplied = true; // = true - user supplies the initial estimates for non-linear regression
|
---|
182 | // = false - the initial estimates for non-linear regression are calculated internally
|
---|
183 |
|
---|
184 | protected double kayValue = 0.0D; // rate parameter value in Erlang distribution (method 35)
|
---|
185 |
|
---|
186 | protected boolean frechetWeibull = true; // Frechet Weibull switch - if true Frechet, if false Weibull
|
---|
187 | protected boolean linNonLin = true; // if true linear method, if false non-linear method
|
---|
188 | protected boolean trueFreq = false; // true if xData values are true frequencies, e.g. in a fit to Gaussian
|
---|
189 | // false if not
|
---|
190 | // if true chiSquarePoisson (see above) is also calculated
|
---|
191 | protected String xLegend = "x axis values"; // x axis legend in X-Y plot
|
---|
192 | protected String yLegend = "y axis values"; // y axis legend in X-Y plot
|
---|
193 | protected String graphTitle = " "; // user supplied graph title
|
---|
194 | protected String graphTitle2 = " "; // second line graph title
|
---|
195 | protected boolean legendCheck = false; // = true if above legends overwritten by user supplied legends
|
---|
196 | protected boolean supressPrint = false; // = true if print results is to be supressed
|
---|
197 | protected boolean supressYYplot= false; // = true if plot of experimental versus calculated is to be supressed
|
---|
198 | protected boolean supressErrorMessages= false; // = true if some designated error messages are to be supressed
|
---|
199 |
|
---|
200 | // Non-linear members
|
---|
201 | protected boolean nlrStatus=true; // Status of non-linear regression on exiting regression method
|
---|
202 | // = true - convergence criterion was met
|
---|
203 | // = false - convergence criterion not met - current estimates returned
|
---|
204 | protected int scaleOpt=0; // if = 0; no scaling of initial estimates
|
---|
205 | // if = 1; initial simplex estimates scaled to unity
|
---|
206 | // if = 2; initial estimates scaled by user provided values in scale[]
|
---|
207 | // (default = 0)
|
---|
208 | protected double[] scale = null; // values to scale initial estimate (see scaleOpt above)
|
---|
209 | protected boolean zeroCheck = false; // true if any best estimate value is zero
|
---|
210 | // if true the scale factor replaces the best estimate in numerical differentiation
|
---|
211 | protected boolean penalty = false; // true if single parameter penalty function is included
|
---|
212 | protected boolean sumPenalty = false; // true if multiple parameter penalty function is included
|
---|
213 | protected int nConstraints = 0; // number of single parameter constraints
|
---|
214 | protected int nSumConstraints = 0; // number of multiple parameter constraints
|
---|
215 | protected int maxConstraintIndex = -1; // maximum index of constrained parameter/s
|
---|
216 | protected double constraintTolerance = 1e-4; // tolerance in constraining parameter/s to a fixed value
|
---|
217 | protected ArrayList<Object> penalties = new ArrayList<Object>(); // constrant method index,
|
---|
218 | // number of single parameter constraints,
|
---|
219 | // then repeated for each constraint:
|
---|
220 | // penalty parameter index,
|
---|
221 | // below or above constraint flag,
|
---|
222 | // constraint boundary value
|
---|
223 | protected ArrayList<Object> sumPenalties = new ArrayList<Object>(); // constraint method index,
|
---|
224 | // number of multiple parameter constraints,
|
---|
225 | // then repeated for each constraint:
|
---|
226 | // number of parameters in summation
|
---|
227 | // penalty parameter indices,
|
---|
228 | // summation signs
|
---|
229 | // below or above constraint flag,
|
---|
230 | // constraint boundary value
|
---|
231 | protected int[] penaltyCheck = null; // = -1 values below the single constraint boundary not allowed
|
---|
232 | // = +1 values above the single constraint boundary not allowed
|
---|
233 | protected int[] sumPenaltyCheck = null; // = -1 values below the multiple constraint boundary not allowed
|
---|
234 | // = +1 values above the multiple constraint boundary not allowed
|
---|
235 | protected double penaltyWeight = 1.0e30; // weight for the penalty functions
|
---|
236 | protected int[] penaltyParam = null; // indices of paramaters subject to single parameter constraint
|
---|
237 | protected int[][] sumPenaltyParam = null; // indices of paramaters subject to multiple parameter constraint
|
---|
238 | protected double[][] sumPlusOrMinus = null; // valueall before each parameter in multiple parameter summation
|
---|
239 | protected int[] sumPenaltyNumber = null; // number of paramaters in each multiple parameter constraint
|
---|
240 |
|
---|
241 | protected double[] constraints = null; // single parameter constraint values
|
---|
242 | protected double[] sumConstraints = null; // multiple parameter constraint values
|
---|
243 | protected int constraintMethod = 0; // constraint method number
|
---|
244 | // =0: cliff to the power two (only method at present)
|
---|
245 |
|
---|
246 | protected boolean scaleFlag = true; // if true ordinate scale factor, Ao, included as unknown in fitting to special functions
|
---|
247 | // if false Ao set to unity (default value) or user provided value (in yScaleFactor)
|
---|
248 | protected double yScaleFactor = 1.0D; // y axis factor - set if scaleFlag (above) = false
|
---|
249 | protected int nMax = 3000; // Nelder and Mead simplex maximum number of iterations
|
---|
250 | protected int nIter = 0; // Nelder and Mead simplex number of iterations performed
|
---|
251 | protected int konvge = 3; // Nelder and Mead simplex number of restarts allowed
|
---|
252 | protected int kRestart = 0; // Nelder and Mead simplex number of restarts taken
|
---|
253 | protected double fMin = -1.0D; // Nelder and Mead simplex minimum value
|
---|
254 | protected double fTol = 1e-9; // Nelder and Mead simplex convergence tolerance
|
---|
255 | protected double rCoeff = 1.0D; // Nelder and Mead simplex reflection coefficient
|
---|
256 | protected double eCoeff = 2.0D; // Nelder and Mead simplex extension coefficient
|
---|
257 | protected double cCoeff = 0.5D; // Nelder and Mead simplex contraction coefficient
|
---|
258 | protected double[] startH = null; // Nelder and Mead simplex unscaled initial estimates
|
---|
259 | protected double[] stepH = null; // Nelder and Mead simplex unscaled initial step values
|
---|
260 | protected double[] startSH = null; // Nelder and Mead simplex scaled initial estimates
|
---|
261 | protected double[] stepSH = null; // Nelder and Mead simplex scaled initial step values
|
---|
262 | protected double dStep = 0.5D; // Nelder and Mead simplex default step value
|
---|
263 | protected double[][] grad = null; // Non-linear regression gradients
|
---|
264 | protected double delta = 1e-4; // Fractional step in numerical differentiation
|
---|
265 | protected double deltaBeale = 1e-3; // Fractional step in calculation of Beale's nonlinearity
|
---|
266 |
|
---|
267 | protected boolean invertFlag=true; // Hessian Matrix ('linear' non-linear statistics) check
|
---|
268 | // true matrix successfully inverted, false inversion failed
|
---|
269 | protected boolean posVarFlag=true; // Hessian Matrix ('linear' non-linear statistics) check
|
---|
270 | // true - all variances are positive; false - at least one is negative
|
---|
271 | protected int minTest = 0; // Nelder and Mead minimum test
|
---|
272 | // = 0; tests simplex sd < fTol
|
---|
273 | // = 1; tests reduced chi suare or sum of squares < mean of abs(y values)*fTol
|
---|
274 | protected double simplexSd = 0.0D; // simplex standard deviation
|
---|
275 | protected boolean statFlag = true; // if true - statistical method called
|
---|
276 | // if false - no statistical analysis
|
---|
277 | protected boolean plotOpt = true; // if true - plot of calculated values is cubic spline interpolation between the calculated values
|
---|
278 | // if false - calculated values linked by straight lines (accomodates Poiwsson distribution plots)
|
---|
279 | protected boolean multipleY = false; // = true if y variable consists of more than set of data each needing a different calculation in RegressionFunction
|
---|
280 | // when set to true - the index of the y value is passed to the function in Regression function
|
---|
281 |
|
---|
282 | protected boolean ignoreDofFcheck = false; // when set to true, the check on whether degrees of freedom are greater than zero is ignored
|
---|
283 |
|
---|
284 | protected double[] values = null; // values entered into gaussianFixed
|
---|
285 | protected boolean[] fixed = null; // true if above values[i] is fixed, false if it is not
|
---|
286 |
|
---|
287 | protected int nGaussians = 0; // Number of Gaussian distributions in multiple Gaussian fitting
|
---|
288 | protected double[] multGaussFract = null; // Best estimates of multiple Gaussian fractional contributions
|
---|
289 | protected double[] multGaussFractErrors = null; // Errors in the estmated of multiple Gaussian fractional contributions
|
---|
290 | protected double[] multGaussCoeffVar = null; // Coefficients of variation of multiple Gaussian fractional contributions
|
---|
291 | protected double[] multGaussTvalue = null; // t-values for multiple Gaussian fractional contributions
|
---|
292 | protected double[] multGaussPvalue = null; // p-values for multiple Gaussian fractional contributions
|
---|
293 | protected double multGaussScale = 1.0; // Scale factor for multiple Gaussian fractional contributions
|
---|
294 | protected double multGaussScaleError = 0.0; // error in the scale factor for multiple Gaussian fractional contributions
|
---|
295 | protected double multGaussScaleCoeffVar = 0.0; // coeff. of var. in the scale factor for multiple Gaussian fractional contributions
|
---|
296 | protected double multGaussScaleTvalue = 0.0; // t-value of the scale factor for multiple Gaussian fractional contributions
|
---|
297 | protected double multGaussScalePvalue = 0.0; // p-value of the scale factor for multiple Gaussian fractional contributions
|
---|
298 |
|
---|
299 | protected boolean plotWindowCloseChoice = false;// if false: closing window terminates program
|
---|
300 | // if true: closing window does not terminate program
|
---|
301 |
|
---|
302 | protected double bottom = 0.0; // bottom value of sigmoid curves
|
---|
303 | protected double bottomIndex = 0.0; // index of the bottom value of a sigmoid curve
|
---|
304 | protected double top = 0.0; // top value of a sigmoid curve
|
---|
305 | protected double topIndex = 0.0; // index of the top value of a sigmoid curve
|
---|
306 | protected int midPointIndex = 0; // index of the mid point of a sigmoid curve
|
---|
307 | protected double midPointXvalue = 0.0; // x-value of the mid point of a sigmoid curve
|
---|
308 | protected double midPointYvalue = 0.0; // y-value of the mid point of a sigmoid curve
|
---|
309 | protected int directionFlag = 0; // = 1, gradient of a sigmoid curve is positive
|
---|
310 | // = -1, gradient of a sigmoid curve is negative
|
---|
311 |
|
---|
312 | // HISTOGRAM CONSTRUCTION
|
---|
313 | // Tolerance used in including an upper point in last histogram bin when it is outside due to riunding erors
|
---|
314 | protected static double histTol = 1.0001D;
|
---|
315 |
|
---|
316 | //CONSRUCTORS
|
---|
317 |
|
---|
318 | // Default constructor - primarily facilitating the subclass ImpedSpecRegression
|
---|
319 | public Regression(){
|
---|
320 | }
|
---|
321 |
|
---|
322 | // Constructor with data with x as 2D array and weights provided
|
---|
323 | public Regression(double[][] xData, double[] yData, double[] weight){
|
---|
324 |
|
---|
325 | int n=weight.length;
|
---|
326 | this.nData0 = yData.length;
|
---|
327 | weight = this.checkForZeroWeights(weight);
|
---|
328 | if(this.weightOpt)this.weightFlag = 1;
|
---|
329 | this.setDefaultValues(Conv.copy(xData), Conv.copy(yData), Conv.copy(weight));
|
---|
330 | }
|
---|
331 |
|
---|
332 | // Constructor with data with x and y as 2D arrays and weights provided
|
---|
333 | public Regression(double[][] xxData, double[][] yyData, double[][] wWeight){
|
---|
334 | this.multipleY = true;
|
---|
335 | int nY1 = yyData.length;
|
---|
336 | this.nYarrays = nY1;
|
---|
337 | int nY2 = yyData[0].length;
|
---|
338 | this.nData0 = nY2;
|
---|
339 | int nX1 = xxData.length;
|
---|
340 | int nX2 = xxData[0].length;
|
---|
341 | double[] yData = new double[nY1*nY2];
|
---|
342 | double[] weight = new double[nY1*nY2];
|
---|
343 | double[][] xData = new double[nY1*nY2][nX1];
|
---|
344 | int ii=0;
|
---|
345 | for(int i=0; i<nY1; i++){
|
---|
346 | int nY = yyData[i].length;
|
---|
347 | if(nY!=nY2)throw new IllegalArgumentException("multiple y arrays must be of the same length");
|
---|
348 | int nX = xxData[i].length;
|
---|
349 | if(nY!=nX)throw new IllegalArgumentException("multiple y arrays must be of the same length as the x array length");
|
---|
350 | for(int j=0; j<nY2; j++){
|
---|
351 | yData[ii] = yyData[i][j];
|
---|
352 | xData[ii][i] = xxData[i][j];
|
---|
353 | weight[ii] = wWeight[i][j];
|
---|
354 | ii++;
|
---|
355 | }
|
---|
356 | }
|
---|
357 | weight = this.checkForZeroWeights(weight);
|
---|
358 | if(this.weightOpt)this.weightFlag = 1;
|
---|
359 | this.setDefaultValues(xData, yData, weight);
|
---|
360 | }
|
---|
361 |
|
---|
362 | // Constructor with data with x as 1D array and weights provided
|
---|
363 | public Regression(double[] xxData, double[] yData, double[] weight){
|
---|
364 | this.nData0 = yData.length;
|
---|
365 | int n = xxData.length;
|
---|
366 | int m = weight.length;
|
---|
367 | double[][] xData = new double[1][n];
|
---|
368 | for(int i=0; i<n; i++){
|
---|
369 | xData[0][i]=xxData[i];
|
---|
370 | }
|
---|
371 |
|
---|
372 | weight = this.checkForZeroWeights(weight);
|
---|
373 | if(this.weightOpt)this.weightFlag = 1;
|
---|
374 | this.setDefaultValues(Conv.copy(xData), Conv.copy(yData), Conv.copy(weight));
|
---|
375 | }
|
---|
376 |
|
---|
377 | // Constructor with data with x as 1D array and y as 2D array and weights provided
|
---|
378 | public Regression(double[] xxData, double[][] yyData, double[][] wWeight){
|
---|
379 |
|
---|
380 | this.multipleY = true;
|
---|
381 | int nY1 = yyData.length;
|
---|
382 | this.nYarrays = nY1;
|
---|
383 | int nY2= yyData[0].length;
|
---|
384 | this.nData0 = nY2;
|
---|
385 | double[] yData = new double[nY1*nY2];
|
---|
386 | double[] weight = new double[nY1*nY2];
|
---|
387 | int ii=0;
|
---|
388 | for(int i=0; i<nY1; i++){
|
---|
389 | int nY = yyData[i].length;
|
---|
390 | if(nY!=nY2)throw new IllegalArgumentException("multiple y arrays must be of the same length");
|
---|
391 | for(int j=0; j<nY2; j++){
|
---|
392 | yData[ii] = yyData[i][j];
|
---|
393 | weight[ii] = wWeight[i][j];
|
---|
394 | ii++;
|
---|
395 | }
|
---|
396 | }
|
---|
397 | int n = xxData.length;
|
---|
398 | if(n!=nY2)throw new IllegalArgumentException("x and y data lengths must be the same");
|
---|
399 | double[][] xData = new double[1][nY1*n];
|
---|
400 | ii=0;
|
---|
401 | for(int j=0; j<nY1; j++){
|
---|
402 | for(int i=0; i<n; i++){
|
---|
403 | xData[0][ii]=xxData[i];
|
---|
404 | ii++;
|
---|
405 | }
|
---|
406 | }
|
---|
407 |
|
---|
408 | weight = this.checkForZeroWeights(weight);
|
---|
409 | if(this.weightOpt)this.weightFlag = 1;
|
---|
410 | this.setDefaultValues(xData, yData, weight);
|
---|
411 | }
|
---|
412 |
|
---|
413 | // Constructor with data with x as 2D array and no weights provided
|
---|
414 | public Regression(double[][] xData, double[] yData){
|
---|
415 | this.nData0 = yData.length;
|
---|
416 | int n = yData.length;
|
---|
417 | double[] weight = new double[n];
|
---|
418 |
|
---|
419 | this.weightOpt=false;
|
---|
420 | this.weightFlag = 0;
|
---|
421 | for(int i=0; i<n; i++)weight[i]=1.0D;
|
---|
422 |
|
---|
423 | setDefaultValues(Conv.copy(xData), Conv.copy(yData), weight);
|
---|
424 | }
|
---|
425 |
|
---|
426 | // Constructor with data with x and y as 2D arrays and no weights provided
|
---|
427 | public Regression(double[][] xxData, double[][] yyData){
|
---|
428 | this.multipleY = true;
|
---|
429 | int nY1 = yyData.length;
|
---|
430 | this.nYarrays = nY1;
|
---|
431 | int nY2 = yyData[0].length;
|
---|
432 | this.nData0 = nY2;
|
---|
433 | int nX1 = xxData.length;
|
---|
434 | int nX2 = xxData[0].length;
|
---|
435 | double[] yData = new double[nY1*nY2];
|
---|
436 | if(nY1!=nX1)throw new IllegalArgumentException("Multiple xData and yData arrays of different overall dimensions not supported");
|
---|
437 | double[][] xData = new double[1][nY1*nY2];
|
---|
438 | int ii=0;
|
---|
439 | for(int i=0; i<nY1; i++){
|
---|
440 | int nY = yyData[i].length;
|
---|
441 | if(nY!=nY2)throw new IllegalArgumentException("multiple y arrays must be of the same length");
|
---|
442 | int nX = xxData[i].length;
|
---|
443 | if(nY!=nX)throw new IllegalArgumentException("multiple y arrays must be of the same length as the x array length");
|
---|
444 | for(int j=0; j<nY2; j++){
|
---|
445 | yData[ii] = yyData[i][j];
|
---|
446 | xData[0][ii] = xxData[i][j];
|
---|
447 | ii++;
|
---|
448 | }
|
---|
449 | }
|
---|
450 |
|
---|
451 | int n = yData.length;
|
---|
452 | double[] weight = new double[n];
|
---|
453 |
|
---|
454 | this.weightOpt=false;
|
---|
455 | for(int i=0; i<n; i++)weight[i]=1.0D;
|
---|
456 | this.weightFlag = 0;
|
---|
457 |
|
---|
458 | setDefaultValues(xData, yData, weight);
|
---|
459 | }
|
---|
460 |
|
---|
461 | // Constructor with data with x as 1D array and no weights provided
|
---|
462 | public Regression(double[] xxData, double[] yData){
|
---|
463 | this.nData0 = yData.length;
|
---|
464 | int n = xxData.length;
|
---|
465 | double[][] xData = new double[1][n];
|
---|
466 | double[] weight = new double[n];
|
---|
467 |
|
---|
468 | for(int i=0; i<n; i++)xData[0][i]=xxData[i];
|
---|
469 |
|
---|
470 | this.weightOpt=false;
|
---|
471 | this.weightFlag = 0;
|
---|
472 | for(int i=0; i<n; i++)weight[i]=1.0D;
|
---|
473 |
|
---|
474 | setDefaultValues(xData, Conv.copy(yData), weight);
|
---|
475 | }
|
---|
476 |
|
---|
477 | // Constructor with data with x as 1D array and y as a 2D array and no weights provided
|
---|
478 | public Regression(double[] xxData, double[][] yyData){
|
---|
479 | this.multipleY = true;
|
---|
480 | int nY1 = yyData.length;
|
---|
481 | this.nYarrays = nY1;
|
---|
482 | int nY2= yyData[0].length;
|
---|
483 | this.nData0 = nY2;
|
---|
484 | double[] yData = new double[nY1*nY2];
|
---|
485 | int ii=0;
|
---|
486 | for(int i=0; i<nY1; i++){
|
---|
487 | int nY = yyData[i].length;
|
---|
488 | if(nY!=nY2)throw new IllegalArgumentException("multiple y arrays must be of the same length");
|
---|
489 | for(int j=0; j<nY2; j++){
|
---|
490 | yData[ii] = yyData[i][j];
|
---|
491 | ii++;
|
---|
492 | }
|
---|
493 | }
|
---|
494 |
|
---|
495 | double[][] xData = new double[1][nY1*nY2];
|
---|
496 | double[] weight = new double[nY1*nY2];
|
---|
497 |
|
---|
498 | ii=0;
|
---|
499 | int n = xxData.length;
|
---|
500 | for(int j=0; j<nY1; j++){
|
---|
501 | for(int i=0; i<n; i++){
|
---|
502 | xData[0][ii]=xxData[i];
|
---|
503 | weight[ii]=1.0D;
|
---|
504 | ii++;
|
---|
505 | }
|
---|
506 | }
|
---|
507 | this.weightOpt=false;
|
---|
508 | this.weightFlag = 0;
|
---|
509 |
|
---|
510 | setDefaultValues(xData, yData, weight);
|
---|
511 | }
|
---|
512 |
|
---|
513 | // Constructor with data as a single array that has to be binned
|
---|
514 | // bin width and value of the low point of the first bin provided
|
---|
515 | public Regression(double[] xxData, double binWidth, double binZero){
|
---|
516 | double[][] data = Regression.histogramBins(Conv.copy(xxData), binWidth, binZero);
|
---|
517 | int n = data[0].length;
|
---|
518 | this.nData0 = n;
|
---|
519 | double[][] xData = new double[1][n];
|
---|
520 | double[] yData = new double[n];
|
---|
521 | double[] weight = new double[n];
|
---|
522 | for(int i=0; i<n; i++){
|
---|
523 | xData[0][i]=data[0][i];
|
---|
524 | yData[i]=data[1][i];
|
---|
525 | }
|
---|
526 | boolean flag = setTrueFreqWeights(yData, weight);
|
---|
527 | if(flag){
|
---|
528 | this.trueFreq=true;
|
---|
529 | this.weightOpt=true;
|
---|
530 | this.weightFlag = 1;
|
---|
531 | }
|
---|
532 | else{
|
---|
533 | this.trueFreq=false;
|
---|
534 | this.weightOpt=false;
|
---|
535 | this.weightFlag = 0;
|
---|
536 | }
|
---|
537 | setDefaultValues(xData, yData, weight);
|
---|
538 | }
|
---|
539 |
|
---|
540 | // Constructor with data as a single array that has to be binned
|
---|
541 | // bin width provided
|
---|
542 | public Regression(double[] xxData, double binWidth){
|
---|
543 | double[][] data = Regression.histogramBins(Conv.copy(xxData), binWidth);
|
---|
544 | int n = data[0].length;
|
---|
545 | this.nData0 = n;
|
---|
546 | double[][] xData = new double[1][n];
|
---|
547 | double[] yData = new double[n];
|
---|
548 | double[] weight = new double[n];
|
---|
549 | for(int i=0; i<n; i++){
|
---|
550 | xData[0][i]=data[0][i];
|
---|
551 | yData[i]=data[1][i];
|
---|
552 | }
|
---|
553 | boolean flag = setTrueFreqWeights(yData, weight);
|
---|
554 | if(flag){
|
---|
555 | this.trueFreq=true;
|
---|
556 | this.weightOpt=true;
|
---|
557 | this.weightFlag = 1;
|
---|
558 | }
|
---|
559 | else{
|
---|
560 | this.trueFreq=false;
|
---|
561 | this.weightOpt=false;
|
---|
562 | this.weightFlag = 0;
|
---|
563 | }
|
---|
564 | System.out.println("sf1 " + scaleFlag);
|
---|
565 | setDefaultValues(xData, yData, weight);
|
---|
566 | System.out.println("sf2 " + scaleFlag);
|
---|
567 | }
|
---|
568 |
|
---|
569 | // Check entered weights for zeros.
|
---|
570 | // If more than 40% are zero or less than zero, all weights replaced by unity
|
---|
571 | // If less than 40% are zero or less than zero, the zero or negative weights are replaced by the average of their nearest neighbours
|
---|
572 | protected double[] checkForZeroWeights(double[] weight){
|
---|
573 | this.weightOpt=true;
|
---|
574 | int nZeros = 0;
|
---|
575 | int n=weight.length;
|
---|
576 |
|
---|
577 | for(int i=0; i<n; i++)if(weight[i]<=0.0)nZeros++;
|
---|
578 | double perCentZeros = 100.0*(double)nZeros/(double)n;
|
---|
579 | if(perCentZeros>40.0){
|
---|
580 | System.out.println(perCentZeros + "% of the weights are zero or less; all weights set to 1.0");
|
---|
581 | for(int i=0; i<n; i++)weight[i]=1.0D;
|
---|
582 | this.weightOpt = false;
|
---|
583 | }
|
---|
584 | else{
|
---|
585 | if(perCentZeros>0.0D){
|
---|
586 | for(int i=0; i<n; i++){
|
---|
587 | if(weight[i]<=0.0){
|
---|
588 | if(i==0){
|
---|
589 | int ii=1;
|
---|
590 | boolean test = true;
|
---|
591 | while(test){
|
---|
592 | if(weight[ii]>0.0D){
|
---|
593 | double ww = weight[0];
|
---|
594 | weight[0] = weight[ii];
|
---|
595 | System.out.println("weight at point " + i + ", " + ww + ", replaced by "+ weight[i]);
|
---|
596 | test = false;
|
---|
597 | }
|
---|
598 | else{
|
---|
599 | ii++;
|
---|
600 | }
|
---|
601 | }
|
---|
602 | }
|
---|
603 | if(i==(n-1)){
|
---|
604 | int ii=n-2;
|
---|
605 | boolean test = true;
|
---|
606 | while(test){
|
---|
607 | if(weight[ii]>0.0D){
|
---|
608 | double ww = weight[i];
|
---|
609 | weight[i] = weight[ii];
|
---|
610 | System.out.println("weight at point " + i + ", " + ww + ", replaced by "+ weight[i]);
|
---|
611 | test = false;
|
---|
612 | }
|
---|
613 | else{
|
---|
614 | ii--;
|
---|
615 | }
|
---|
616 | }
|
---|
617 | }
|
---|
618 | if(i>0 && i<(n-2)){
|
---|
619 | double lower = 0.0;
|
---|
620 | double upper = 0.0;
|
---|
621 | int ii=i-1;
|
---|
622 | boolean test = true;
|
---|
623 | while(test){
|
---|
624 | if(weight[ii]>0.0D){
|
---|
625 | lower = weight[ii];
|
---|
626 | test = false;
|
---|
627 | }
|
---|
628 | else{
|
---|
629 | ii--;
|
---|
630 | if(ii==0)test = false;
|
---|
631 | }
|
---|
632 | }
|
---|
633 | ii=i+1;
|
---|
634 | test = true;
|
---|
635 | while(test){
|
---|
636 | if(weight[ii]>0.0D){
|
---|
637 | upper = weight[ii];
|
---|
638 | test = false;
|
---|
639 | }
|
---|
640 | else{
|
---|
641 | ii++;
|
---|
642 | if(ii==(n-1))test = false;
|
---|
643 | }
|
---|
644 | }
|
---|
645 | double ww = weight[i];
|
---|
646 | if(lower==0.0){
|
---|
647 | weight[i] = upper;
|
---|
648 | }
|
---|
649 | else{
|
---|
650 | if(upper==0.0){
|
---|
651 | weight[i] = lower;
|
---|
652 | }
|
---|
653 | else{
|
---|
654 | weight[i] = (lower + upper)/2.0;
|
---|
655 | }
|
---|
656 | }
|
---|
657 | System.out.println("weight at point " + i + ", " + ww + ", replaced by "+ weight[i]);
|
---|
658 | }
|
---|
659 | }
|
---|
660 | }
|
---|
661 | }
|
---|
662 | }
|
---|
663 | return weight;
|
---|
664 | }
|
---|
665 |
|
---|
666 | // Enter data methods
|
---|
667 | // Enter data with x as 2D array and weights provided
|
---|
668 | public void enterData(double[][] xData, double[] yData, double[] weight){
|
---|
669 |
|
---|
670 | int n=weight.length;
|
---|
671 | this.nData0 = yData.length;
|
---|
672 | this.weightOpt=true;
|
---|
673 | weight = this.checkForZeroWeights(weight);
|
---|
674 | if(this.weightOpt)this.weightFlag = 1;
|
---|
675 | this.setDefaultValues(xData, yData, weight);
|
---|
676 | }
|
---|
677 |
|
---|
678 | // Enter data with x and y as 2D arrays and weights provided
|
---|
679 | public void enterData(double[][] xxData, double[][] yyData, double[][] wWeight){
|
---|
680 | this.multipleY = true;
|
---|
681 | int nY1 = yyData.length;
|
---|
682 | this.nYarrays = nY1;
|
---|
683 | int nY2 = yyData[0].length;
|
---|
684 | this.nData0 = nY2;
|
---|
685 | int nX1 = xxData.length;
|
---|
686 | int nX2 = xxData[0].length;
|
---|
687 | double[] yData = new double[nY1*nY2];
|
---|
688 | double[] weight = new double[nY1*nY2];
|
---|
689 | double[][] xData = new double[nY1*nY2][nX1];
|
---|
690 | int ii=0;
|
---|
691 | for(int i=0; i<nY1; i++){
|
---|
692 | int nY = yyData[i].length;
|
---|
693 | if(nY!=nY2)throw new IllegalArgumentException("multiple y arrays must be of the same length");
|
---|
694 | int nX = xxData[i].length;
|
---|
695 | if(nY!=nX)throw new IllegalArgumentException("multiple y arrays must be of the same length as the x array length");
|
---|
696 | for(int j=0; j<nY2; j++){
|
---|
697 | yData[ii] = yyData[i][j];
|
---|
698 | xData[ii][i] = xxData[i][j];
|
---|
699 | weight[ii] = wWeight[i][j];
|
---|
700 | ii++;
|
---|
701 | }
|
---|
702 | }
|
---|
703 |
|
---|
704 | weight = this.checkForZeroWeights(weight);
|
---|
705 | if(this.weightOpt)this.weightFlag = 1;
|
---|
706 | this.setDefaultValues(xData, yData, weight);
|
---|
707 | }
|
---|
708 |
|
---|
709 | // Enter data with x as 1D array and weights provided
|
---|
710 | public void enterData(double[] xxData, double[] yData, double[] weight){
|
---|
711 | this.nData0 = yData.length;
|
---|
712 | int n = xxData.length;
|
---|
713 | int m = weight.length;
|
---|
714 | double[][] xData = new double[1][n];
|
---|
715 | for(int i=0; i<n; i++){
|
---|
716 | xData[0][i]=xxData[i];
|
---|
717 | }
|
---|
718 |
|
---|
719 | weight = this.checkForZeroWeights(weight);
|
---|
720 | if(this.weightOpt)this.weightFlag = 1;
|
---|
721 | this.setDefaultValues(xData, yData, weight);
|
---|
722 | }
|
---|
723 |
|
---|
724 | // Enter data with x as 1D array and y as 2D array and weights provided
|
---|
725 | public void enterData(double[] xxData, double[][] yyData, double[][] wWeight){
|
---|
726 |
|
---|
727 | this.multipleY = true;
|
---|
728 | int nY1 = yyData.length;
|
---|
729 | this.nYarrays = nY1;
|
---|
730 | int nY2= yyData[0].length;
|
---|
731 | this.nData0 = nY2;
|
---|
732 | double[] yData = new double[nY1*nY2];
|
---|
733 | double[] weight = new double[nY1*nY2];
|
---|
734 | int ii=0;
|
---|
735 | for(int i=0; i<nY1; i++){
|
---|
736 | int nY = yyData[i].length;
|
---|
737 | if(nY!=nY2)throw new IllegalArgumentException("multiple y arrays must be of the same length");
|
---|
738 | for(int j=0; j<nY2; j++){
|
---|
739 | yData[ii] = yyData[i][j];
|
---|
740 | weight[ii] = wWeight[i][j];
|
---|
741 | ii++;
|
---|
742 | }
|
---|
743 | }
|
---|
744 | int n = xxData.length;
|
---|
745 | if(n!=nY2)throw new IllegalArgumentException("x and y data lengths must be the same");
|
---|
746 | double[][] xData = new double[1][nY1*n];
|
---|
747 | ii=0;
|
---|
748 | for(int j=0; j<nY1; j++){
|
---|
749 | for(int i=0; i<n; i++){
|
---|
750 | xData[0][ii]=xxData[i];
|
---|
751 | ii++;
|
---|
752 | }
|
---|
753 | }
|
---|
754 |
|
---|
755 | weight = this.checkForZeroWeights(weight);
|
---|
756 | if(this.weightOpt)this.weightFlag = 1;
|
---|
757 | this.setDefaultValues(xData, yData, weight);
|
---|
758 | }
|
---|
759 |
|
---|
760 | // Enter data with x as 2D array and no weights provided
|
---|
761 | public void enterData(double[][] xData, double[] yData){
|
---|
762 | this.nData0 = yData.length;
|
---|
763 | int n = yData.length;
|
---|
764 | double[] weight = new double[n];
|
---|
765 |
|
---|
766 | this.weightOpt=false;
|
---|
767 | for(int i=0; i<n; i++)weight[i]=1.0D;
|
---|
768 | this.weightFlag = 0;
|
---|
769 | setDefaultValues(xData, yData, weight);
|
---|
770 | }
|
---|
771 |
|
---|
772 | // Enter data with x and y as 2D arrays and no weights provided
|
---|
773 | public void enterData(double[][] xxData, double[][] yyData){
|
---|
774 | this.multipleY = true;
|
---|
775 | int nY1 = yyData.length;
|
---|
776 | this.nYarrays = nY1;
|
---|
777 | int nY2 = yyData[0].length;
|
---|
778 | this.nData0 = nY2;
|
---|
779 | int nX1 = xxData.length;
|
---|
780 | int nX2 = xxData[0].length;
|
---|
781 | double[] yData = new double[nY1*nY2];
|
---|
782 | double[][] xData = new double[nY1*nY2][nX1];
|
---|
783 | int ii=0;
|
---|
784 | for(int i=0; i<nY1; i++){
|
---|
785 | int nY = yyData[i].length;
|
---|
786 | if(nY!=nY2)throw new IllegalArgumentException("multiple y arrays must be of the same length");
|
---|
787 | int nX = xxData[i].length;
|
---|
788 | if(nY!=nX)throw new IllegalArgumentException("multiple y arrays must be of the same length as the x array length");
|
---|
789 | for(int j=0; j<nY2; j++){
|
---|
790 | yData[ii] = yyData[i][j];
|
---|
791 | xData[ii][i] = xxData[i][j];
|
---|
792 | ii++;
|
---|
793 | }
|
---|
794 | }
|
---|
795 |
|
---|
796 | int n = yData.length;
|
---|
797 | double[] weight = new double[n];
|
---|
798 |
|
---|
799 | this.weightOpt=false;
|
---|
800 | for(int i=0; i<n; i++)weight[i]=1.0D;
|
---|
801 | this.weightFlag = 0;
|
---|
802 |
|
---|
803 | setDefaultValues(xData, yData, weight);
|
---|
804 | }
|
---|
805 |
|
---|
806 | // Enter data with x as 1D array and no weights provided
|
---|
807 | public void enterData(double[] xxData, double[] yData){
|
---|
808 | this.nData0 = yData.length;
|
---|
809 | int n = xxData.length;
|
---|
810 | double[][] xData = new double[1][n];
|
---|
811 | double[] weight = new double[n];
|
---|
812 |
|
---|
813 | for(int i=0; i<n; i++)xData[0][i]=xxData[i];
|
---|
814 |
|
---|
815 | this.weightOpt=false;
|
---|
816 | for(int i=0; i<n; i++)weight[i]=1.0D;
|
---|
817 | this.weightFlag = 0;
|
---|
818 |
|
---|
819 | setDefaultValues(xData, yData, weight);
|
---|
820 | }
|
---|
821 |
|
---|
822 | // Enter data with x as 1D array and y as a 2D array and no weights provided
|
---|
823 | public void enterData(double[] xxData, double[][] yyData){
|
---|
824 | this.multipleY = true;
|
---|
825 | int nY1 = yyData.length;
|
---|
826 | this.nYarrays = nY1;
|
---|
827 | int nY2= yyData[0].length;
|
---|
828 | this.nData0 = nY2;
|
---|
829 | double[] yData = new double[nY1*nY2];
|
---|
830 | int ii=0;
|
---|
831 | for(int i=0; i<nY1; i++){
|
---|
832 | int nY = yyData[i].length;
|
---|
833 | if(nY!=nY2)throw new IllegalArgumentException("multiple y arrays must be of the same length");
|
---|
834 | for(int j=0; j<nY2; j++){
|
---|
835 | yData[ii] = yyData[i][j];
|
---|
836 | ii++;
|
---|
837 | }
|
---|
838 | }
|
---|
839 |
|
---|
840 | double[][] xData = new double[1][nY1*nY2];
|
---|
841 | double[] weight = new double[nY1*nY2];
|
---|
842 |
|
---|
843 | ii=0;
|
---|
844 | int n = xxData.length;
|
---|
845 | for(int j=0; j<nY1; j++){
|
---|
846 | for(int i=0; i<n; i++){
|
---|
847 | xData[0][ii]=xxData[i];
|
---|
848 | weight[ii]=1.0D;
|
---|
849 | ii++;
|
---|
850 | }
|
---|
851 | }
|
---|
852 | this.weightOpt=false;
|
---|
853 | this.weightFlag = 0;
|
---|
854 |
|
---|
855 | this.setDefaultValues(xData, yData, weight);
|
---|
856 | }
|
---|
857 |
|
---|
858 | // Enter data as a single array that has to be binned
|
---|
859 | // bin width and value of the low point of the first bin provided
|
---|
860 | public void enterData(double[] xxData, double binWidth, double binZero){
|
---|
861 | double[][] data = Regression.histogramBins(xxData, binWidth, binZero);
|
---|
862 | int n = data[0].length;
|
---|
863 | this.nData0 = n;
|
---|
864 | double[][] xData = new double[1][n];
|
---|
865 | double[] yData = new double[n];
|
---|
866 | double[] weight = new double[n];
|
---|
867 | for(int i=0; i<n; i++){
|
---|
868 | xData[0][i]=data[0][i];
|
---|
869 | yData[i]=data[1][i];
|
---|
870 | }
|
---|
871 | boolean flag = setTrueFreqWeights(yData, weight);
|
---|
872 | if(flag){
|
---|
873 | this.trueFreq=true;
|
---|
874 | this.weightOpt=true;
|
---|
875 | this.weightFlag = 1;
|
---|
876 | }
|
---|
877 | else{
|
---|
878 | this.trueFreq=false;
|
---|
879 | this.weightOpt=false;
|
---|
880 | this.weightFlag = 0;
|
---|
881 | }
|
---|
882 | setDefaultValues(xData, yData, weight);
|
---|
883 | }
|
---|
884 |
|
---|
885 | // Enter data as a single array that has to be binned
|
---|
886 | // bin width provided
|
---|
887 | public void enterData(double[] xxData, double binWidth){
|
---|
888 | double[][] data = Regression.histogramBins(xxData, binWidth);
|
---|
889 | int n = data[0].length;
|
---|
890 | this.nData0 = n;
|
---|
891 | double[][] xData = new double[1][n];
|
---|
892 | double[] yData = new double[n];
|
---|
893 | double[] weight = new double[n];
|
---|
894 | for(int i=0; i<n; i++){
|
---|
895 | xData[0][i]=data[0][i];
|
---|
896 | yData[i]=data[1][i];
|
---|
897 | }
|
---|
898 | boolean flag = setTrueFreqWeights(yData, weight);
|
---|
899 | if(flag){
|
---|
900 | this.trueFreq=true;
|
---|
901 | this.weightOpt=true;
|
---|
902 | this.weightFlag = 0;
|
---|
903 | }
|
---|
904 | else{
|
---|
905 | this.trueFreq=false;
|
---|
906 | this.weightOpt=false;
|
---|
907 | this.weightFlag = 0;
|
---|
908 | }
|
---|
909 | setDefaultValues(xData, yData, weight);
|
---|
910 | }
|
---|
911 |
|
---|
912 |
|
---|
913 | protected static boolean setTrueFreqWeights(double[] yData, double[] weight){
|
---|
914 | int nData=yData.length;
|
---|
915 | boolean flag = true;
|
---|
916 | boolean unityWeight=false;
|
---|
917 |
|
---|
918 | // Set all weights to square root of frequency of occurence
|
---|
919 | for(int ii=0; ii<nData; ii++){
|
---|
920 | weight[ii]=Math.sqrt(Math.abs(yData[ii]));
|
---|
921 | }
|
---|
922 |
|
---|
923 | // Check for zero weights and take average of neighbours as weight if it is zero
|
---|
924 | for(int ii=0; ii<nData; ii++){
|
---|
925 | double last = 0.0D;
|
---|
926 | double next = 0.0D;
|
---|
927 | if(weight[ii]==0){
|
---|
928 | // find previous non-zero value
|
---|
929 | boolean testLast = true;
|
---|
930 | int iLast = ii - 1;
|
---|
931 | while(testLast){
|
---|
932 | if(iLast<0){
|
---|
933 | testLast = false;
|
---|
934 | }
|
---|
935 | else{
|
---|
936 | if(weight[iLast]==0.0D){
|
---|
937 | iLast--;
|
---|
938 | }
|
---|
939 | else{
|
---|
940 | last = weight[iLast];
|
---|
941 | testLast = false;
|
---|
942 | }
|
---|
943 | }
|
---|
944 | }
|
---|
945 |
|
---|
946 | // find next non-zero value
|
---|
947 | boolean testNext = true;
|
---|
948 | int iNext = ii + 1;
|
---|
949 | while(testNext){
|
---|
950 | if(iNext>=nData){
|
---|
951 | testNext = false;
|
---|
952 | }
|
---|
953 | else{
|
---|
954 | if(weight[iNext]==0.0D){
|
---|
955 | iNext++;
|
---|
956 | }
|
---|
957 | else{
|
---|
958 | next = weight[iNext];
|
---|
959 | testNext = false;
|
---|
960 | }
|
---|
961 | }
|
---|
962 | }
|
---|
963 |
|
---|
964 | // Take average
|
---|
965 | weight[ii]=(last + next)/2.0D;
|
---|
966 | }
|
---|
967 | }
|
---|
968 | return flag;
|
---|
969 | }
|
---|
970 |
|
---|
971 | // Set data and default values
|
---|
972 | protected void setDefaultValues(double[][] xData, double[] yData, double[] weight){
|
---|
973 | this.nData = yData.length;
|
---|
974 | this.nXarrays = xData.length;
|
---|
975 | this.nTerms = this.nXarrays;
|
---|
976 | this.yData = new double[nData];
|
---|
977 | this.yCalc = new double[nData];
|
---|
978 | this.weight = new double[nData];
|
---|
979 | this.residual = new double[nData];
|
---|
980 | this.residualW = new double[nData];
|
---|
981 | this.xData = new double[nXarrays][nData];
|
---|
982 | int n=weight.length;
|
---|
983 | if(n!=this.nData)throw new IllegalArgumentException("The weight and the y data lengths do not agree");
|
---|
984 | for(int i=0; i<this.nData; i++){
|
---|
985 | this.yData[i]=yData[i];
|
---|
986 | this.weight[i]=weight[i];
|
---|
987 | }
|
---|
988 | for(int j=0; j<this.nXarrays; j++){
|
---|
989 | n=xData[j].length;
|
---|
990 | if(n!=this.nData)throw new IllegalArgumentException("An x [" + j + "] length " + n + " and the y data length, " + this.nData + ", do not agree");
|
---|
991 | for(int i=0; i<this.nData; i++){
|
---|
992 | this.xData[j][i]=xData[j][i];
|
---|
993 | }
|
---|
994 | }
|
---|
995 | }
|
---|
996 |
|
---|
997 | // Set standard deviation, variance and covariance denominators to n
|
---|
998 | public static void setDenominatorToN(){
|
---|
999 | Stat.setStaticDenominatorToN();
|
---|
1000 | }
|
---|
1001 |
|
---|
1002 | // Set standard deviation, variance and covariance denominators to n
|
---|
1003 | public static void setDenominatorToNminusOne(){
|
---|
1004 | Stat.setStaticDenominatorToNminusOne();
|
---|
1005 | }
|
---|
1006 |
|
---|
1007 | // Reset value of cfMaxIter used in contFract method in Stat called by the regularised incomplete beta function methods in Stat
|
---|
1008 | // These are called from Regression, e.g. in the calculation of p-Values
|
---|
1009 | public static void resetCFmaxIter(int cfMaxIter){
|
---|
1010 | Stat.resetCFmaxIter(cfMaxIter);
|
---|
1011 | }
|
---|
1012 |
|
---|
1013 | // Get value of cfMaxIter used in contFract method in Stat called by the regularised incomplete beta function methods in Stat
|
---|
1014 | // These are called from Regression, e.g. in the calculation of p-Values
|
---|
1015 | public static int getCFmaxIter(){
|
---|
1016 | return Stat.getCFmaxIter();
|
---|
1017 | }
|
---|
1018 |
|
---|
1019 | // Reset value of cfTol used in contFract method in Stat called by the regularised incomplete beta function methods in Stat
|
---|
1020 | // These are called from Regression, e.g. in the calculation of p-Values
|
---|
1021 | public static void resetCFtolerance(double cfTol){
|
---|
1022 | Stat.resetCFtolerance(cfTol);
|
---|
1023 | }
|
---|
1024 |
|
---|
1025 | // Get value of cfTol used in contFract method in Stat called by the regularised incomplete beta function methods in Stat
|
---|
1026 | // These are called from Regression, e.g. in the calculation of p-Values
|
---|
1027 | public static double getCFtolerance(){
|
---|
1028 | return Stat.getCFtolerance();
|
---|
1029 | }
|
---|
1030 |
|
---|
1031 | // Supress printing of results
|
---|
1032 | public void supressPrint(){
|
---|
1033 | this.supressPrint = true;
|
---|
1034 | }
|
---|
1035 |
|
---|
1036 | // Supress plot of calculated versus experimental values
|
---|
1037 | public void supressYYplot(){
|
---|
1038 | this.supressYYplot = true;
|
---|
1039 | }
|
---|
1040 |
|
---|
1041 | // Supress convergence and chiSquare error messages
|
---|
1042 | public void supressErrorMessages(){
|
---|
1043 | this.supressErrorMessages = true;
|
---|
1044 | }
|
---|
1045 |
|
---|
1046 | // Ignore check on whether degrtees of freedom are greater than zero
|
---|
1047 | public void ignoreDofFcheck(){
|
---|
1048 | this.ignoreDofFcheck = true;
|
---|
1049 | }
|
---|
1050 |
|
---|
1051 | // Supress the statistical analysis
|
---|
1052 | public void supressStats(){
|
---|
1053 | this.statFlag = false;
|
---|
1054 | }
|
---|
1055 |
|
---|
1056 | // Reinstate statistical analysis
|
---|
1057 | public void reinstateStats(){
|
---|
1058 | this.statFlag = true;
|
---|
1059 | }
|
---|
1060 |
|
---|
1061 | // Reset window close option
|
---|
1062 | // argument = 1: closing plot window also terminates the program
|
---|
1063 | // argument = 2: closing plot window leaves program running
|
---|
1064 | public void setCloseChoice(int closeChoice){
|
---|
1065 | switch(closeChoice){
|
---|
1066 | case 1: this.plotWindowCloseChoice = false;
|
---|
1067 | break;
|
---|
1068 | case 2: this.plotWindowCloseChoice = true;
|
---|
1069 | break;
|
---|
1070 | default: throw new IllegalArgumentException("Option " + closeChoice + " not recognised");
|
---|
1071 | }
|
---|
1072 | }
|
---|
1073 |
|
---|
1074 | // Reset the ordinate scale factor option
|
---|
1075 | // true - Ao is unkown to be found by regression procedure
|
---|
1076 | // false - Ao set to unity
|
---|
1077 | public void setYscaleOption(boolean flag){
|
---|
1078 | this.scaleFlag=flag;
|
---|
1079 | if(flag==false)this.yScaleFactor = 1.0D;
|
---|
1080 | }
|
---|
1081 |
|
---|
1082 | // Reset the ordinate scale factor option
|
---|
1083 | // true - Ao is unkown to be found by regression procedure
|
---|
1084 | // false - Ao set to unity
|
---|
1085 | // retained for backward compatibility
|
---|
1086 | public void setYscale(boolean flag){
|
---|
1087 | this.scaleFlag=flag;
|
---|
1088 | if(flag==false)this.yScaleFactor = 1.0D;
|
---|
1089 | }
|
---|
1090 |
|
---|
1091 | // Reset the ordinate scale factor option
|
---|
1092 | // true - Ao is unkown to be found by regression procedure
|
---|
1093 | // false - Ao set to given value
|
---|
1094 | public void setYscaleFactor(double scale){
|
---|
1095 | this.scaleFlag=false;
|
---|
1096 | this.yScaleFactor = scale;
|
---|
1097 | }
|
---|
1098 |
|
---|
1099 | // Get the ordinate scale factor option
|
---|
1100 | // true - Ao is unkown
|
---|
1101 | // false - Ao set to unity
|
---|
1102 | public boolean getYscaleOption(){
|
---|
1103 | return this.scaleFlag;
|
---|
1104 | }
|
---|
1105 |
|
---|
1106 | // Get the ordinate scale factor option
|
---|
1107 | // true - Ao is unkown
|
---|
1108 | // false - Ao set to unity
|
---|
1109 | // retained to ensure backward compatibility
|
---|
1110 | public boolean getYscale(){
|
---|
1111 | return this.scaleFlag;
|
---|
1112 | }
|
---|
1113 |
|
---|
1114 | // Reset the true frequency test, trueFreq
|
---|
1115 | // true if yData values are true frequencies, e.g. in a fit to Gaussian; false if not
|
---|
1116 | // if true chiSquarePoisson (see above) is also calculated
|
---|
1117 | public void setTrueFreq(boolean trFr){
|
---|
1118 | boolean trFrOld = this.trueFreq;
|
---|
1119 | this.trueFreq = trFr;
|
---|
1120 | if(trFr){
|
---|
1121 | boolean flag = setTrueFreqWeights(this.yData, this.weight);
|
---|
1122 | if(flag){
|
---|
1123 | this.trueFreq=true;
|
---|
1124 | this.weightOpt=true;
|
---|
1125 | }
|
---|
1126 | else{
|
---|
1127 | this.trueFreq=false;
|
---|
1128 | this.weightOpt=false;
|
---|
1129 | }
|
---|
1130 | }
|
---|
1131 | else{
|
---|
1132 | if(trFrOld){
|
---|
1133 | for(int i=0; i<this.weight.length; i++){
|
---|
1134 | weight[i]=1.0D;
|
---|
1135 | }
|
---|
1136 | this.weightOpt=false;
|
---|
1137 | }
|
---|
1138 | }
|
---|
1139 | }
|
---|
1140 |
|
---|
1141 | // Get the true frequency test, trueFreq
|
---|
1142 | public boolean getTrueFreq(){
|
---|
1143 | return this.trueFreq;
|
---|
1144 | }
|
---|
1145 |
|
---|
1146 | // Reset the x axis legend
|
---|
1147 | public void setXlegend(String legend){
|
---|
1148 | this.xLegend = legend;
|
---|
1149 | this.legendCheck=true;
|
---|
1150 | }
|
---|
1151 |
|
---|
1152 | // Reset the y axis legend
|
---|
1153 | public void setYlegend(String legend){
|
---|
1154 | this.yLegend = legend;
|
---|
1155 | this.legendCheck=true;
|
---|
1156 | }
|
---|
1157 |
|
---|
1158 | // Set the title
|
---|
1159 | public void setTitle(String title){
|
---|
1160 | this.graphTitle = title;
|
---|
1161 | }
|
---|
1162 |
|
---|
1163 | // Fit to a constant
|
---|
1164 | // y = a
|
---|
1165 | public void constant(){
|
---|
1166 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
1167 | this.lastMethod = 46;
|
---|
1168 | this.linNonLin = true;
|
---|
1169 | this.nTerms = 1;
|
---|
1170 | this.degreesOfFreedom = this.nData - this.nTerms;
|
---|
1171 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
1172 | this.best = new double[this.nTerms];
|
---|
1173 | this.bestSd = new double[this.nTerms];
|
---|
1174 | this.tValues = new double[this.nTerms];
|
---|
1175 | this.pValues = new double[this.nTerms];
|
---|
1176 |
|
---|
1177 | this.best[0] = Stat.mean(this.yData, this.weight);
|
---|
1178 | this.bestSd[0] = Stat.standardDeviation(this.yData, this.weight);
|
---|
1179 | this.tValues[0] = this.best[0]/this.bestSd[0];
|
---|
1180 |
|
---|
1181 | double atv = Math.abs(this.tValues[0]);
|
---|
1182 | if(atv!=atv){
|
---|
1183 | this.pValues[0] = Double.NaN;
|
---|
1184 | }
|
---|
1185 | else{
|
---|
1186 | this.pValues[0] = 1.0 - Stat.studentTcdf(-atv, atv, this.degreesOfFreedom);
|
---|
1187 | }
|
---|
1188 |
|
---|
1189 | this.sumOfSquaresError = 0.0;
|
---|
1190 | this.chiSquare = 0.0;
|
---|
1191 | for(int i=0; i<this.nData; i++){
|
---|
1192 | this.yCalc[i] = best[0];
|
---|
1193 | this.residual[i] = this.yCalc[i] - this.yData[i];
|
---|
1194 | this.residualW[i] = this.residual[i]/this.weight[i];
|
---|
1195 | this.sumOfSquaresError += this.residual[i]*this.residual[i];
|
---|
1196 | this.chiSquare += this.residualW[i]*this.residualW[i];
|
---|
1197 | }
|
---|
1198 | this.reducedChiSquare = this.chiSquare/this.degreesOfFreedom;
|
---|
1199 | }
|
---|
1200 |
|
---|
1201 | // Fit to a constant
|
---|
1202 | // plus plot and output file
|
---|
1203 | // y = a
|
---|
1204 | // legends provided
|
---|
1205 | public void constantPlot(String xLegend, String yLegend){
|
---|
1206 | this.xLegend = xLegend;
|
---|
1207 | this.yLegend = yLegend;
|
---|
1208 | this.legendCheck = true;
|
---|
1209 | this.constant();
|
---|
1210 | if(!this.supressPrint)this.print();
|
---|
1211 | int flag = 0;
|
---|
1212 | if(this.xData.length<2)flag = this.plotXY();
|
---|
1213 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
1214 | }
|
---|
1215 |
|
---|
1216 | // Fit to a constant
|
---|
1217 | // plus plot and output file
|
---|
1218 | // y = a
|
---|
1219 | // no legends provided
|
---|
1220 | public void constantPlot(){
|
---|
1221 | this.constant();
|
---|
1222 | if(!this.supressPrint)this.print();
|
---|
1223 | int flag = 0;
|
---|
1224 | if(this.xData.length<2)flag = this.plotXY();
|
---|
1225 | }
|
---|
1226 |
|
---|
1227 | // Multiple linear regression with intercept (including y = ax + b)
|
---|
1228 | // y = a + b.x1 + c.x2 + d.x3 + . . .
|
---|
1229 | public void linear(){
|
---|
1230 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
1231 | this.lastMethod = 0;
|
---|
1232 | this.linNonLin = true;
|
---|
1233 | this.nTerms = this.nXarrays+1;
|
---|
1234 | this.degreesOfFreedom = this.nData - this.nTerms;
|
---|
1235 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
1236 | double[][] aa = new double[this.nTerms][this.nData];
|
---|
1237 |
|
---|
1238 | for(int j=0; j<nData; j++)aa[0][j]=1.0D;
|
---|
1239 | for(int i=1; i<nTerms; i++){
|
---|
1240 | for(int j=0; j<nData; j++){
|
---|
1241 | aa[i][j]=this.xData[i-1][j];
|
---|
1242 | }
|
---|
1243 | }
|
---|
1244 | this.best = new double[this.nTerms];
|
---|
1245 | this.bestSd = new double[this.nTerms];
|
---|
1246 | this.tValues = new double[this.nTerms];
|
---|
1247 | this.pValues = new double[this.nTerms];
|
---|
1248 | this.generalLinear(aa);
|
---|
1249 | if(!this.ignoreDofFcheck)this.generalLinearStats(aa);
|
---|
1250 | }
|
---|
1251 |
|
---|
1252 | // Multiple linear regression with intercept (including y = ax + b)
|
---|
1253 | // plus plot and output file
|
---|
1254 | // y = a + b.x1 + c.x2 + d.x3 + . . .
|
---|
1255 | // legends provided
|
---|
1256 | public void linearPlot(String xLegend, String yLegend){
|
---|
1257 | this.xLegend = xLegend;
|
---|
1258 | this.yLegend = yLegend;
|
---|
1259 | this.legendCheck = true;
|
---|
1260 | this.linear();
|
---|
1261 | if(!this.supressPrint)this.print();
|
---|
1262 | int flag = 0;
|
---|
1263 | if(this.xData.length<2)flag = this.plotXY();
|
---|
1264 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
1265 | }
|
---|
1266 |
|
---|
1267 | // Multiple linear regression with intercept (including y = ax + b)
|
---|
1268 | // plus plot and output file
|
---|
1269 | // y = a + b.x1 + c.x2 + d.x3 + . . .
|
---|
1270 | // no legends provided
|
---|
1271 | public void linearPlot(){
|
---|
1272 | this.linear();
|
---|
1273 | if(!this.supressPrint)this.print();
|
---|
1274 | int flag = 0;
|
---|
1275 | if(this.xData.length<2)flag = this.plotXY();
|
---|
1276 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
1277 | }
|
---|
1278 |
|
---|
1279 | // Multiple linear regression with intercept (including y = ax + b)
|
---|
1280 | // y = a + b.x1 + c.x2 + d.x3 + . . .
|
---|
1281 | // a fixed (argument: intercept)
|
---|
1282 | public void linear(double intercept){
|
---|
1283 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
1284 | this.lastMethod = 47;
|
---|
1285 | this.fixedInterceptL = intercept;
|
---|
1286 | this.linNonLin = true;
|
---|
1287 | this.nTerms = this.nXarrays;
|
---|
1288 | this.degreesOfFreedom = this.nData - this.nTerms;
|
---|
1289 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
1290 | double[][] aa = new double[this.nTerms][this.nData];
|
---|
1291 |
|
---|
1292 | for(int j=0; j<nData; j++)this.yData[j] -= intercept;
|
---|
1293 | for(int i=0; i<nTerms; i++){
|
---|
1294 | for(int j=0; j<nData; j++){
|
---|
1295 | aa[i][j]=this.xData[i][j];
|
---|
1296 | }
|
---|
1297 | }
|
---|
1298 | this.best = new double[this.nTerms];
|
---|
1299 | this.bestSd = new double[this.nTerms];
|
---|
1300 | this.tValues = new double[this.nTerms];
|
---|
1301 | this.pValues = new double[this.nTerms];
|
---|
1302 | this.generalLinear(aa);
|
---|
1303 | if(!this.ignoreDofFcheck)this.generalLinearStats(aa);
|
---|
1304 | for(int j=0; j<nData; j++){
|
---|
1305 | this.yData[j] += intercept;
|
---|
1306 | this.yCalc[j] += intercept;
|
---|
1307 | }
|
---|
1308 |
|
---|
1309 | }
|
---|
1310 |
|
---|
1311 | // Multiple linear regression with intercept (including y = ax + b)
|
---|
1312 | // plus plot and output file
|
---|
1313 | // y = a + b.x1 + c.x2 + d.x3 + . . .
|
---|
1314 | // a fixed (argument: intercept)
|
---|
1315 | // legends provided
|
---|
1316 | public void linearPlot(double intercept, String xLegend, String yLegend){
|
---|
1317 | this.xLegend = xLegend;
|
---|
1318 | this.yLegend = yLegend;
|
---|
1319 | this.legendCheck = true;
|
---|
1320 | this.linear(intercept);
|
---|
1321 | if(!this.supressPrint)this.print();
|
---|
1322 | int flag = 0;
|
---|
1323 | if(this.xData.length<2)flag = this.plotXY();
|
---|
1324 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
1325 | }
|
---|
1326 |
|
---|
1327 | // Multiple linear regression with intercept (including y = ax + b)
|
---|
1328 | // plus plot and output file
|
---|
1329 | // y = a + b.x1 + c.x2 + d.x3 + . . .
|
---|
1330 | // a fixed (argument: intercept)
|
---|
1331 | // no legends provided
|
---|
1332 | public void linearPlot(double intercept){
|
---|
1333 | this.linear(intercept);
|
---|
1334 | if(!this.supressPrint)this.print();
|
---|
1335 | int flag = 0;
|
---|
1336 | if(this.xData.length<2)flag = this.plotXY();
|
---|
1337 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
1338 | }
|
---|
1339 |
|
---|
1340 |
|
---|
1341 | // Polynomial fitting
|
---|
1342 | // y = a + b.x + c.x^2 + d.x^3 + . . .
|
---|
1343 | public void polynomial(int deg){
|
---|
1344 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
1345 | if(this.nXarrays>1)throw new IllegalArgumentException("This class will only perform a polynomial regression on a single x array");
|
---|
1346 | if(deg<1)throw new IllegalArgumentException("Polynomial degree must be greater than zero");
|
---|
1347 | this.lastMethod = 1;
|
---|
1348 | this.linNonLin = true;
|
---|
1349 | this.nTerms = deg+1;
|
---|
1350 | this.degreesOfFreedom = this.nData - this.nTerms;
|
---|
1351 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
1352 | double[][] aa = new double[this.nTerms][this.nData];
|
---|
1353 |
|
---|
1354 | for(int j=0; j<nData; j++)aa[0][j]=1.0D;
|
---|
1355 | for(int j=0; j<nData; j++)aa[1][j]=this.xData[0][j];
|
---|
1356 |
|
---|
1357 | for(int i=2; i<nTerms; i++){
|
---|
1358 | for(int j=0; j<nData; j++){
|
---|
1359 | aa[i][j]=Math.pow(this.xData[0][j],i);
|
---|
1360 | }
|
---|
1361 | }
|
---|
1362 | this.best = new double[this.nTerms];
|
---|
1363 | this.bestSd = new double[this.nTerms];
|
---|
1364 | this.tValues = new double[this.nTerms];
|
---|
1365 | this.pValues = new double[this.nTerms];
|
---|
1366 | this.generalLinear(aa);
|
---|
1367 | if(!this.ignoreDofFcheck)this.generalLinearStats(aa);
|
---|
1368 | }
|
---|
1369 |
|
---|
1370 | // Polynomial fitting plus plot and output file
|
---|
1371 | // y = a + b.x + c.x^2 + d.x^3 + . . .
|
---|
1372 | // legends provided
|
---|
1373 | public void polynomialPlot(int n, String xLegend, String yLegend){
|
---|
1374 | this.xLegend = xLegend;
|
---|
1375 | this.yLegend = yLegend;
|
---|
1376 | this.legendCheck = true;
|
---|
1377 | this.polynomial(n);
|
---|
1378 | if(!this.supressPrint)this.print();
|
---|
1379 | int flag = this.plotXY();
|
---|
1380 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
1381 | }
|
---|
1382 |
|
---|
1383 | // Polynomial fitting plus plot and output file
|
---|
1384 | // y = a + b.x + c.x^2 + d.x^3 + . . .
|
---|
1385 | // No legends provided
|
---|
1386 | public void polynomialPlot(int n){
|
---|
1387 | this.polynomial(n);
|
---|
1388 | if(!this.supressPrint)this.print();
|
---|
1389 | int flag = this.plotXY();
|
---|
1390 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
1391 | }
|
---|
1392 |
|
---|
1393 | // Polynomial fitting
|
---|
1394 | // y = a + b.x + c.x^2 + d.x^3 + . . .
|
---|
1395 | // a is fixed (argument: intercept)
|
---|
1396 | public void polynomial(int deg, double intercept){
|
---|
1397 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
1398 | if(this.nXarrays>1)throw new IllegalArgumentException("This class will only perform a polynomial regression on a single x array");
|
---|
1399 | if(deg<1)throw new IllegalArgumentException("Polynomial degree must be greater than zero");
|
---|
1400 | this.lastMethod = 48;
|
---|
1401 | this.fixedInterceptP = intercept;
|
---|
1402 | this.linNonLin = true;
|
---|
1403 | this.nTerms = deg;
|
---|
1404 | this.degreesOfFreedom = this.nData - this.nTerms;
|
---|
1405 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
1406 | double[][] aa = new double[this.nTerms][this.nData];
|
---|
1407 |
|
---|
1408 | for(int j=0; j<nData; j++)this.yData[j] -= intercept;
|
---|
1409 | for(int j=0; j<nData; j++)aa[0][j]=this.xData[0][j];
|
---|
1410 |
|
---|
1411 | for(int i=1; i<nTerms; i++){
|
---|
1412 | for(int j=0; j<nData; j++){
|
---|
1413 | aa[i][j]=Math.pow(this.xData[0][j],i+1);
|
---|
1414 | }
|
---|
1415 | }
|
---|
1416 | this.best = new double[this.nTerms];
|
---|
1417 | this.bestSd = new double[this.nTerms];
|
---|
1418 | this.tValues = new double[this.nTerms];
|
---|
1419 | this.pValues = new double[this.nTerms];
|
---|
1420 | this.generalLinear(aa);
|
---|
1421 | if(!this.ignoreDofFcheck)this.generalLinearStats(aa);
|
---|
1422 | for(int j=0; j<nData; j++){
|
---|
1423 | this.yData[j] += intercept;
|
---|
1424 | this.yCalc[j] += intercept;
|
---|
1425 | }
|
---|
1426 | }
|
---|
1427 |
|
---|
1428 |
|
---|
1429 | // Polynomial fitting plus plot and output file
|
---|
1430 | // y = a + b.x + c.x^2 + d.x^3 + . . .
|
---|
1431 | // a is fixed (argument: intercept)
|
---|
1432 | // legends provided
|
---|
1433 | public void polynomialPlot(int n, double intercept, String xLegend, String yLegend){
|
---|
1434 | this.xLegend = xLegend;
|
---|
1435 | this.yLegend = yLegend;
|
---|
1436 | this.legendCheck = true;
|
---|
1437 | this.polynomial(n, intercept);
|
---|
1438 | if(!this.supressPrint)this.print();
|
---|
1439 | int flag = this.plotXY();
|
---|
1440 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
1441 | }
|
---|
1442 |
|
---|
1443 | // Polynomial fitting plus plot and output file
|
---|
1444 | // y = a + b.x + c.x^2 + d.x^3 + . . .
|
---|
1445 | // a is fixed (argument: intercept)
|
---|
1446 | // No legends provided
|
---|
1447 | public void polynomialPlot(int n, double intercept){
|
---|
1448 | this.polynomial(n, intercept);
|
---|
1449 | if(!this.supressPrint)this.print();
|
---|
1450 | int flag = this.plotXY();
|
---|
1451 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
1452 | }
|
---|
1453 |
|
---|
1454 | // Best polynomial
|
---|
1455 | // Finds the best polynomial fit
|
---|
1456 | public ArrayList<Object> bestPolynomial(){
|
---|
1457 | return polynomialBest(0);
|
---|
1458 | }
|
---|
1459 |
|
---|
1460 | // Internal method finding the best polynomial fit
|
---|
1461 | public ArrayList<Object> polynomialBest(int flag){
|
---|
1462 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
1463 | if(this.nXarrays>1)throw new IllegalArgumentException("This class will only perform a polynomial regression on a single x array");
|
---|
1464 | this.bestPolyFlag = true;
|
---|
1465 | this.linNonLin = true;
|
---|
1466 |
|
---|
1467 | ArrayList<Object> array0 = null;
|
---|
1468 | ArrayList<Object> array1 = new ArrayList<Object>();
|
---|
1469 |
|
---|
1470 | int nAttempts = this.nData-3;
|
---|
1471 | Regression reg = new Regression(xData[0], yData);
|
---|
1472 | reg.constant();
|
---|
1473 | double chi = reg.getChiSquare();
|
---|
1474 | double chiMin = chi;
|
---|
1475 | double chiLast = chi;
|
---|
1476 | int chiMinIndex = 0;
|
---|
1477 | array1.add(new Double(chi));
|
---|
1478 | reg.linear();
|
---|
1479 | chi = reg.getChiSquare();
|
---|
1480 | array0 = Regression.testOfAdditionalTerms_ArrayList(chiLast, 0, chi, 1, nData);
|
---|
1481 | double fRatio = ((Double)array0.get(0)).doubleValue();
|
---|
1482 | double fProb = ((Double)array0.get(1)).doubleValue();
|
---|
1483 | array1.add(new Double(chi));
|
---|
1484 | array1.add(new Double(fRatio));
|
---|
1485 | array1.add(new Double(fProb));
|
---|
1486 | if(chiMin>=chi){
|
---|
1487 | chiMin = chi;
|
---|
1488 | chiMinIndex = 1;
|
---|
1489 | }
|
---|
1490 | boolean test0 = true;
|
---|
1491 | int ii = 2;
|
---|
1492 | int iiEnd = nAttempts;
|
---|
1493 | boolean testEnd = true;
|
---|
1494 | while(test0){
|
---|
1495 | chiLast = chi;
|
---|
1496 | reg.polynomial(ii);
|
---|
1497 | chi = reg.getChiSquare();
|
---|
1498 | array0 = Regression.testOfAdditionalTerms_ArrayList(chiLast, ii-1, chi, ii, nData);
|
---|
1499 | fRatio = ((Double)array0.get(0)).doubleValue();
|
---|
1500 | fProb = ((Double)array0.get(1)).doubleValue();
|
---|
1501 | array1.add(new Double(chi));
|
---|
1502 | array1.add(new Double(fRatio));
|
---|
1503 | array1.add(new Double(fProb));
|
---|
1504 | if(chiMin>=chi){
|
---|
1505 | chiMin = chi;
|
---|
1506 | chiMinIndex = ii;
|
---|
1507 | }
|
---|
1508 | if(chi>=chiLast && testEnd){
|
---|
1509 | iiEnd = ii + 2;
|
---|
1510 | testEnd = false;
|
---|
1511 | if(iiEnd>nAttempts)iiEnd = nAttempts;
|
---|
1512 | }
|
---|
1513 | ii++;
|
---|
1514 | if(ii>iiEnd)test0 = false;
|
---|
1515 | }
|
---|
1516 |
|
---|
1517 | double[] chia = new double[iiEnd+1];
|
---|
1518 | double[] fRatioa = new double[iiEnd+1];
|
---|
1519 | double[] fProba = new double[iiEnd+1];
|
---|
1520 | int ipoly = 0;
|
---|
1521 | if(chiMin>0){
|
---|
1522 | chia[0] = ((Double)array1.get(0)).doubleValue();
|
---|
1523 | int jj = 1;
|
---|
1524 | for(int i=1; i<=iiEnd; i++){
|
---|
1525 | chia[i] = ((Double)array1.get(jj++)).doubleValue();
|
---|
1526 | fRatioa[i] = ((Double)array1.get(jj++)).doubleValue();
|
---|
1527 | fProba[i] = ((Double)array1.get(jj++)).doubleValue();
|
---|
1528 | }
|
---|
1529 |
|
---|
1530 | test0 = true;
|
---|
1531 | ii = chiMinIndex;
|
---|
1532 | double fProbMin = fProba[ii];
|
---|
1533 | ii--;
|
---|
1534 | while(test0){
|
---|
1535 | if(fProba[ii]>fProbMin){
|
---|
1536 | test0 = false;
|
---|
1537 | ipoly = ii+1;
|
---|
1538 | }
|
---|
1539 | else{
|
---|
1540 | ii--;
|
---|
1541 | if(ii<0){
|
---|
1542 | test0 = false;
|
---|
1543 | ipoly = 0;
|
---|
1544 | }
|
---|
1545 | }
|
---|
1546 | }
|
---|
1547 | }
|
---|
1548 | this.bestPolynomialDegree = ipoly;
|
---|
1549 |
|
---|
1550 | // Repack ArrayList
|
---|
1551 | int[] deg0s = new int[iiEnd];
|
---|
1552 | int[] deg1s = new int[iiEnd];
|
---|
1553 | double[] chi0s = new double[iiEnd];
|
---|
1554 | double[] chi1s = new double[iiEnd];
|
---|
1555 | for(int i=0; i<iiEnd; i++){
|
---|
1556 | deg0s[i] = i;
|
---|
1557 | deg1s[i] = i+1;
|
---|
1558 | chi0s[i] = chia[i];
|
---|
1559 | chi1s[i] = chia[i+1];
|
---|
1560 | }
|
---|
1561 |
|
---|
1562 | this.bestPolyArray.clear();
|
---|
1563 | this.bestPolyArray.add(new Integer(this.bestPolynomialDegree));
|
---|
1564 | this.bestPolyArray.add(new Integer(iiEnd+1));
|
---|
1565 | this.bestPolyArray.add(deg0s);
|
---|
1566 | this.bestPolyArray.add(deg1s);
|
---|
1567 | this.bestPolyArray.add(chi0s);
|
---|
1568 | this.bestPolyArray.add(chi1s);
|
---|
1569 | this.bestPolyArray.add(fRatioa);
|
---|
1570 | this.bestPolyArray.add(fProba);
|
---|
1571 |
|
---|
1572 |
|
---|
1573 | switch(flag){
|
---|
1574 | case 0: // No plot
|
---|
1575 | switch(this.bestPolynomialDegree){
|
---|
1576 | case 0: this.constant();
|
---|
1577 | break;
|
---|
1578 | case 1: this.linear();
|
---|
1579 | break;
|
---|
1580 | default: this.polynomial(this.bestPolynomialDegree);
|
---|
1581 | }
|
---|
1582 | break;
|
---|
1583 | case 1: // Plot
|
---|
1584 | switch(this.bestPolynomialDegree){
|
---|
1585 | case 0: this.constantPlot();
|
---|
1586 | break;
|
---|
1587 | case 1: this.linearPlot();
|
---|
1588 | break;
|
---|
1589 | default: this.polynomialPlot(this.bestPolynomialDegree);
|
---|
1590 | }
|
---|
1591 | }
|
---|
1592 |
|
---|
1593 | return this.bestPolyArray;
|
---|
1594 |
|
---|
1595 | }
|
---|
1596 |
|
---|
1597 |
|
---|
1598 | // Best polynomial
|
---|
1599 | // Finds the best polynomial fit
|
---|
1600 | // plus plot and output file
|
---|
1601 | // Legends provided
|
---|
1602 | public ArrayList<Object> bestPolynomialPlot(String xLegend, String yLegend){
|
---|
1603 | this.xLegend = xLegend;
|
---|
1604 | this.yLegend = yLegend;
|
---|
1605 | this.legendCheck = true;
|
---|
1606 | return this.polynomialBest(1);
|
---|
1607 | }
|
---|
1608 |
|
---|
1609 | // Best polynomial
|
---|
1610 | // Finds the best polynomial fit
|
---|
1611 | // plus plot and output file
|
---|
1612 | // No legends provided
|
---|
1613 | public ArrayList<Object> bestPolynomialPlot(){
|
---|
1614 | return this.polynomialBest(1);
|
---|
1615 | }
|
---|
1616 |
|
---|
1617 | // Best polynomial
|
---|
1618 | // Finds the best polynomial fit
|
---|
1619 | // Fixed intercept
|
---|
1620 | public ArrayList<Object> bestPolynomial(double fixedIntercept){
|
---|
1621 | this.fixedInterceptP = fixedIntercept;
|
---|
1622 | return polynomialBest(fixedIntercept, 0);
|
---|
1623 | }
|
---|
1624 |
|
---|
1625 |
|
---|
1626 | // Internal method finding the best polynomial fit
|
---|
1627 | public ArrayList<Object> polynomialBest(double fixedIntercept, int flag){
|
---|
1628 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
1629 | if(this.nXarrays>1)throw new IllegalArgumentException("This class will only perform a polynomial regression on a single x array");
|
---|
1630 | this.bestPolyFlag = true;
|
---|
1631 | this.linNonLin = true;
|
---|
1632 |
|
---|
1633 | ArrayList<Object> array0 = null;
|
---|
1634 | ArrayList<Object> array1 = new ArrayList<Object>();
|
---|
1635 |
|
---|
1636 | int nAttempts = this.nData-3;
|
---|
1637 | Regression reg = new Regression(xData[0], yData);
|
---|
1638 | reg.polynomial(1, fixedIntercept);
|
---|
1639 | double chi = reg.getChiSquare();
|
---|
1640 | double chiMin = chi;
|
---|
1641 | double chiLast = chi;
|
---|
1642 | int chiMinIndex = 1;
|
---|
1643 | array1.add(new Double(chi));
|
---|
1644 | reg.polynomial(2, fixedIntercept);
|
---|
1645 | chi = reg.getChiSquare();
|
---|
1646 | array0 = Regression.testOfAdditionalTerms_ArrayList(chiLast, 0, chi, 1, nData);
|
---|
1647 | double fRatio = ((Double)array0.get(0)).doubleValue();
|
---|
1648 | double fProb = ((Double)array0.get(1)).doubleValue();
|
---|
1649 | array1.add(new Double(chi));
|
---|
1650 | array1.add(new Double(fRatio));
|
---|
1651 | array1.add(new Double(fProb));
|
---|
1652 | if(chiMin>=chi){
|
---|
1653 | chiMin = chi;
|
---|
1654 | chiMinIndex = 2;
|
---|
1655 | }
|
---|
1656 | boolean test0 = true;
|
---|
1657 | int ii = 3;
|
---|
1658 | int iiEnd = nAttempts;
|
---|
1659 | boolean testEnd = true;
|
---|
1660 | while(test0){
|
---|
1661 | chiLast = chi;
|
---|
1662 | reg.polynomial(ii, fixedIntercept);
|
---|
1663 | chi = reg.getChiSquare();
|
---|
1664 | array0 = Regression.testOfAdditionalTerms_ArrayList(chiLast, ii-1, chi, ii, nData);
|
---|
1665 | fRatio = ((Double)array0.get(0)).doubleValue();
|
---|
1666 | fProb = ((Double)array0.get(1)).doubleValue();
|
---|
1667 | array1.add(new Double(chi));
|
---|
1668 | array1.add(new Double(fRatio));
|
---|
1669 | array1.add(new Double(fProb));
|
---|
1670 |
|
---|
1671 | if(chiMin>=chi){
|
---|
1672 | chiMin = chi;
|
---|
1673 | chiMinIndex = ii;
|
---|
1674 | }
|
---|
1675 | if(chi>=chiLast && testEnd){
|
---|
1676 | iiEnd = ii + 2;
|
---|
1677 | testEnd = false;
|
---|
1678 | if(iiEnd>nAttempts)iiEnd = nAttempts;
|
---|
1679 | }
|
---|
1680 | ii++;
|
---|
1681 | if(ii>iiEnd)test0 = false;
|
---|
1682 | }
|
---|
1683 |
|
---|
1684 |
|
---|
1685 | double[] chia = new double[iiEnd];
|
---|
1686 | double[] fRatioa = new double[iiEnd];
|
---|
1687 | double[] fProba = new double[iiEnd];
|
---|
1688 | int ipoly = 0;
|
---|
1689 | if(chiMin>0){
|
---|
1690 | chia[0] = ((Double)array1.get(0)).doubleValue();
|
---|
1691 | int jj = 1;
|
---|
1692 | for(int i=1; i<iiEnd; i++){
|
---|
1693 | chia[i] = ((Double)array1.get(jj++)).doubleValue();
|
---|
1694 | fRatioa[i] = ((Double)array1.get(jj++)).doubleValue();
|
---|
1695 | fProba[i] = ((Double)array1.get(jj++)).doubleValue();
|
---|
1696 | }
|
---|
1697 |
|
---|
1698 | test0 = true;
|
---|
1699 | ii = chiMinIndex;
|
---|
1700 | double fProbMin = fProba[ii-1];
|
---|
1701 | ii--;
|
---|
1702 | while(test0){
|
---|
1703 | if(fProba[ii]>fProbMin){
|
---|
1704 | test0 = false;
|
---|
1705 | ipoly = ii+2;
|
---|
1706 | }
|
---|
1707 | else{
|
---|
1708 | ii--;
|
---|
1709 | if(ii<0){
|
---|
1710 | test0 = false;
|
---|
1711 | ipoly = 0;
|
---|
1712 | }
|
---|
1713 | }
|
---|
1714 | }
|
---|
1715 | }
|
---|
1716 | this.bestPolynomialDegree = ipoly;
|
---|
1717 |
|
---|
1718 | // Repack ArrayList
|
---|
1719 | int[] deg0s = new int[iiEnd-1];
|
---|
1720 | int[] deg1s = new int[iiEnd-1];
|
---|
1721 | double[] chi0s = new double[iiEnd-1];
|
---|
1722 | double[] chi1s = new double[iiEnd-1];
|
---|
1723 | for(int i=0; i<iiEnd-1; i++){
|
---|
1724 | deg0s[i] = i;
|
---|
1725 | deg1s[i] = i+1;
|
---|
1726 | chi0s[i] = chia[i];
|
---|
1727 | chi1s[i] = chia[i+1];
|
---|
1728 | }
|
---|
1729 |
|
---|
1730 | this.bestPolyArray.clear();
|
---|
1731 | this.bestPolyArray.add(new Integer(this.bestPolynomialDegree));
|
---|
1732 | this.bestPolyArray.add(new Integer(iiEnd));
|
---|
1733 | this.bestPolyArray.add(deg0s);
|
---|
1734 | this.bestPolyArray.add(deg1s);
|
---|
1735 | this.bestPolyArray.add(chi0s);
|
---|
1736 | this.bestPolyArray.add(chi1s);
|
---|
1737 | this.bestPolyArray.add(fRatioa);
|
---|
1738 | this.bestPolyArray.add(fProba);
|
---|
1739 |
|
---|
1740 |
|
---|
1741 | switch(flag){
|
---|
1742 | case 0: // No plot
|
---|
1743 | this.polynomial(this.bestPolynomialDegree, fixedIntercept);
|
---|
1744 | break;
|
---|
1745 | case 1: // Plot
|
---|
1746 | this.polynomialPlot(this.bestPolynomialDegree, fixedIntercept);
|
---|
1747 | }
|
---|
1748 |
|
---|
1749 | return this.bestPolyArray;
|
---|
1750 |
|
---|
1751 | }
|
---|
1752 |
|
---|
1753 | // Best polynomial
|
---|
1754 | // Finds the best polynomial fit with a fixed intercept
|
---|
1755 | // plus plot and output file
|
---|
1756 | // Legends provided
|
---|
1757 | public ArrayList<Object> bestPolynomialPlot(double fixedIntercept, String xLegend, String yLegend){
|
---|
1758 | this.xLegend = xLegend;
|
---|
1759 | this.yLegend = yLegend;
|
---|
1760 | this.legendCheck = true;
|
---|
1761 | return this.polynomialBest(fixedIntercept, 1);
|
---|
1762 | }
|
---|
1763 |
|
---|
1764 | // Best polynomial
|
---|
1765 | // Finds the best polynomial fit
|
---|
1766 | // plus plot and output file
|
---|
1767 | // No legends provided
|
---|
1768 | public ArrayList<Object> bestPolynomialPlot(double fixedIntercept){
|
---|
1769 | return this.polynomialBest(fixedIntercept, 1);
|
---|
1770 | }
|
---|
1771 |
|
---|
1772 | // Set significance level used in bestPolynomial F-test
|
---|
1773 | public void setFtestSignificance(double signif){
|
---|
1774 | this.fProbSignificance = signif;
|
---|
1775 | }
|
---|
1776 |
|
---|
1777 | // get significance level used in bestPolynomial F-test
|
---|
1778 | public double getFtestSignificance(double signif){
|
---|
1779 | return this.fProbSignificance;
|
---|
1780 | }
|
---|
1781 |
|
---|
1782 | // Method for fitting data to a non-integer polynomial
|
---|
1783 | // y = a + b.x + c.x^d
|
---|
1784 | // No plotting
|
---|
1785 | public void nonIntegerPolynomial(){
|
---|
1786 | this.fitNonIntegerPolynomial(0);
|
---|
1787 | }
|
---|
1788 |
|
---|
1789 | // Method for fitting data to a non-integer polynomial
|
---|
1790 | // y = a + b.x + c.x^d
|
---|
1791 | // with plotting
|
---|
1792 | public void nonIntegerPolynomialPlot(){
|
---|
1793 | this.fitNonIntegerPolynomial(1);
|
---|
1794 | }
|
---|
1795 |
|
---|
1796 | // Method for fitting data to a non-integer polynomial
|
---|
1797 | // y = a + b.x + c.x^d
|
---|
1798 | // with plotting and user supplied legends
|
---|
1799 | public void nonIntegerPolynomialPlot(String xLegend, String yLegend){
|
---|
1800 | this.xLegend = xLegend;
|
---|
1801 | this.yLegend = yLegend;
|
---|
1802 | this.legendCheck = true;
|
---|
1803 | this.fitNonIntegerPolynomial(1);
|
---|
1804 | }
|
---|
1805 |
|
---|
1806 | // Internal method for fitting data to a non-integer polynomial
|
---|
1807 | // y = a + b.x + c.x^d
|
---|
1808 | protected void fitNonIntegerPolynomial(int plotFlag){
|
---|
1809 |
|
---|
1810 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
1811 | this.lastMethod = 50;
|
---|
1812 | this.userSupplied = false;
|
---|
1813 | this.linNonLin = false;
|
---|
1814 | this.zeroCheck = false;
|
---|
1815 | this.nTerms = 4;
|
---|
1816 | this.degreesOfFreedom = this.nData - this.nTerms;
|
---|
1817 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
1818 |
|
---|
1819 | // order data into ascending order of the abscissae
|
---|
1820 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
1821 |
|
---|
1822 | // Estimate of parameters
|
---|
1823 | Regression reg = new Regression(this.xData[0], this.yData, this.weight);
|
---|
1824 |
|
---|
1825 | reg.polynomial(2);
|
---|
1826 |
|
---|
1827 | double[] start = new double[4];
|
---|
1828 | double[] step = new double[4];
|
---|
1829 |
|
---|
1830 | double[] best = reg.getBestEstimates();
|
---|
1831 | for(int i=0; i<3; i++){
|
---|
1832 | start[i] = best[i];
|
---|
1833 | step[i] = start[i]*0.1;
|
---|
1834 | if(step[i]==0.0)step[i] = 0.1;
|
---|
1835 | }
|
---|
1836 | start[3] = 3.0;
|
---|
1837 | step[3] = 0.6;
|
---|
1838 |
|
---|
1839 | // Nelder and Mead Simplex Regression
|
---|
1840 | NonIntegerPolyFunction f = new NonIntegerPolyFunction();
|
---|
1841 | Object regFun = (Object)f;
|
---|
1842 | this.nelderMead(regFun, start, step, this.fTol, this.nMax);
|
---|
1843 |
|
---|
1844 | if(plotFlag==1){
|
---|
1845 | // Print results
|
---|
1846 | if(!this.supressPrint)this.print();
|
---|
1847 |
|
---|
1848 | // Plot results
|
---|
1849 | int flag = this.plotXY(f);
|
---|
1850 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
1851 | }
|
---|
1852 | }
|
---|
1853 |
|
---|
1854 | // Generalised linear regression
|
---|
1855 | // y = a.f1(x) + b.f2(x) + c.f3(x) + . . .
|
---|
1856 | public void linearGeneral(){
|
---|
1857 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
1858 | this.lastMethod = 2;
|
---|
1859 |
|
---|
1860 | this.linNonLin = true;
|
---|
1861 | this.nTerms = this.nXarrays;
|
---|
1862 | this.degreesOfFreedom = this.nData - this.nTerms;
|
---|
1863 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
1864 | this.best = new double[this.nTerms];
|
---|
1865 | this.bestSd = new double[this.nTerms];
|
---|
1866 | this.tValues = new double[this.nTerms];
|
---|
1867 | this.pValues = new double[this.nTerms];
|
---|
1868 | this.generalLinear(this.xData);
|
---|
1869 | if(!this.ignoreDofFcheck)this.generalLinearStats(this.xData);
|
---|
1870 | }
|
---|
1871 |
|
---|
1872 | // Generalised linear regression plus plot and output file
|
---|
1873 | // y = a.f1(x) + b.f2(x) + c.f3(x) + . . .
|
---|
1874 | // legends provided
|
---|
1875 | public void linearGeneralPlot(String xLegend, String yLegend){
|
---|
1876 | this.xLegend = xLegend;
|
---|
1877 | this.yLegend = yLegend;
|
---|
1878 | this.legendCheck = true;
|
---|
1879 | this.linearGeneral();
|
---|
1880 | if(!this.supressPrint)this.print();
|
---|
1881 | if(!this.supressYYplot)this.plotYY();
|
---|
1882 | }
|
---|
1883 |
|
---|
1884 | // Generalised linear regression plus plot and output file
|
---|
1885 | // y = a.f1(x) + b.f2(x) + c.f3(x) + . . .
|
---|
1886 | // No legends provided
|
---|
1887 | public void linearGeneralPlot(){
|
---|
1888 | this.linearGeneral();
|
---|
1889 | if(!this.supressPrint)this.print();
|
---|
1890 | if(!this.supressYYplot)this.plotYY();
|
---|
1891 | }
|
---|
1892 |
|
---|
1893 | // Generalised linear regression (protected method called by linear(), linearGeneral() and polynomial())
|
---|
1894 | protected void generalLinear(double[][] xd){
|
---|
1895 | if(this.nData<=this.nTerms && !this.ignoreDofFcheck)throw new IllegalArgumentException("Number of unknown parameters is greater than or equal to the number of data points");
|
---|
1896 | double sde=0.0D, sum=0.0D, yCalctemp=0.0D;
|
---|
1897 | double[][] a = new double[this.nTerms][this.nTerms];
|
---|
1898 | double[][] h = new double[this.nTerms][this.nTerms];
|
---|
1899 | double[]b = new double[this.nTerms];
|
---|
1900 | double[]coeff = new double[this.nTerms];
|
---|
1901 |
|
---|
1902 | // set statistic arrays to NaN if df check ignored
|
---|
1903 | if(this.ignoreDofFcheck){
|
---|
1904 | this.bestSd = new double[this.nTerms];
|
---|
1905 | this.pseudoSd = new double[this.nTerms];
|
---|
1906 | this.tValues = new double[this.nTerms];
|
---|
1907 | this.pValues = new double[this.nTerms];
|
---|
1908 |
|
---|
1909 | this.covar = new double[this.nTerms][this.nTerms];
|
---|
1910 | this.corrCoeff = new double[this.nTerms][this.nTerms];;
|
---|
1911 | for(int i=0; i<this.nTerms; i++){
|
---|
1912 | this.bestSd[i] = Double.NaN;
|
---|
1913 | this.pseudoSd[i] = Double.NaN;
|
---|
1914 | for(int j=0; j<this.nTerms; j++){
|
---|
1915 | this.covar[i][j] = Double.NaN;
|
---|
1916 | this.corrCoeff[i][j] = Double.NaN;
|
---|
1917 | }
|
---|
1918 | }
|
---|
1919 | }
|
---|
1920 |
|
---|
1921 | for (int i=0; i<nTerms; ++i){
|
---|
1922 | sum=0.0D ;
|
---|
1923 | for (int j=0; j<nData; ++j){
|
---|
1924 | sum += this.yData[j]*xd[i][j]/Fmath.square(this.weight[j]);
|
---|
1925 | }
|
---|
1926 | b[i]=sum;
|
---|
1927 | }
|
---|
1928 | for (int i=0; i<nTerms; ++i){
|
---|
1929 | for (int j=0; j<nTerms; ++j){
|
---|
1930 | sum=0.0;
|
---|
1931 | for (int k=0; k<nData; ++k){
|
---|
1932 | sum += xd[i][k]*xd[j][k]/Fmath.square(this.weight[k]);
|
---|
1933 | }
|
---|
1934 | a[j][i]=sum;
|
---|
1935 | }
|
---|
1936 | }
|
---|
1937 | Matrix aa = new Matrix(a);
|
---|
1938 | if(this.supressErrorMessages)aa.supressErrorMessage();
|
---|
1939 | coeff = aa.solveLinearSet(b);
|
---|
1940 |
|
---|
1941 | for(int i=0; i<this.nTerms; i++){
|
---|
1942 | this.best[i] = coeff[i];
|
---|
1943 | }
|
---|
1944 | }
|
---|
1945 |
|
---|
1946 | // Generalised linear regression statistics (protected method called by linear(), linearGeneral() and polynomial())
|
---|
1947 | protected void generalLinearStats(double[][] xd){
|
---|
1948 |
|
---|
1949 | double sde=0.0D, sum=0.0D, yCalctemp=0.0D;
|
---|
1950 | double[][] a = new double[this.nTerms][this.nTerms];
|
---|
1951 | double[][] h = new double[this.nTerms][this.nTerms];
|
---|
1952 | double[][] stat = new double[this.nTerms][this.nTerms];
|
---|
1953 | double[][] cov = new double[this.nTerms][this.nTerms];
|
---|
1954 | this.covar = new double[this.nTerms][this.nTerms];
|
---|
1955 | this.corrCoeff = new double[this.nTerms][this.nTerms];
|
---|
1956 | double[]coeffSd = new double[this.nTerms];
|
---|
1957 | double[]coeff = new double[this.nTerms];
|
---|
1958 |
|
---|
1959 | for(int i=0; i<this.nTerms; i++){
|
---|
1960 | coeff[i] = this.best[i];
|
---|
1961 | }
|
---|
1962 |
|
---|
1963 | this.chiSquare=0.0D;
|
---|
1964 | this.sumOfSquaresError=0.0D;
|
---|
1965 | for (int i=0; i< nData; ++i){
|
---|
1966 | yCalctemp=0.0;
|
---|
1967 | for (int j=0; j<nTerms; ++j){
|
---|
1968 | yCalctemp += coeff[j]*xd[j][i];
|
---|
1969 | }
|
---|
1970 | this.yCalc[i] = yCalctemp;
|
---|
1971 | yCalctemp -= this.yData[i];
|
---|
1972 | this.residual[i]=yCalctemp;
|
---|
1973 | this.residualW[i]=yCalctemp/weight[i];
|
---|
1974 | this.chiSquare += Fmath.square(yCalctemp/this.weight[i]);
|
---|
1975 | this.sumOfSquaresError += Fmath.square(yCalctemp);
|
---|
1976 | }
|
---|
1977 | this.reducedChiSquare = this.chiSquare/(this.degreesOfFreedom);
|
---|
1978 | double varY = this.sumOfSquaresError/(this.degreesOfFreedom);
|
---|
1979 | double sdY = Math.sqrt(varY);
|
---|
1980 |
|
---|
1981 | if(this.sumOfSquaresError==0.0D){
|
---|
1982 | for(int i=0; i<this.nTerms;i++){
|
---|
1983 | coeffSd[i]=0.0D;
|
---|
1984 | for(int j=0; j<this.nTerms;j++){
|
---|
1985 | this.covar[i][j]=0.0D;
|
---|
1986 | if(i==j){
|
---|
1987 | this.corrCoeff[i][j]=1.0D;
|
---|
1988 | }
|
---|
1989 | else{
|
---|
1990 | this.corrCoeff[i][j]=0.0D;
|
---|
1991 | }
|
---|
1992 | }
|
---|
1993 | }
|
---|
1994 | }
|
---|
1995 | else{
|
---|
1996 | for (int i=0; i<this.nTerms; ++i){
|
---|
1997 | for (int j=0; j<this.nTerms; ++j){
|
---|
1998 | sum=0.0;
|
---|
1999 | for (int k=0; k<this.nData; ++k){
|
---|
2000 | if (weightOpt){
|
---|
2001 | sde = weight[k];
|
---|
2002 | }
|
---|
2003 | else{
|
---|
2004 | sde = sdY;
|
---|
2005 | }
|
---|
2006 | sum += xd[i][k]*xd[j][k]/Fmath.square(sde);
|
---|
2007 | }
|
---|
2008 | h[j][i]=sum;
|
---|
2009 | }
|
---|
2010 | }
|
---|
2011 | Matrix hh = new Matrix(h);
|
---|
2012 | if(this.supressErrorMessages)hh.supressErrorMessage();
|
---|
2013 | hh = hh.inverse();
|
---|
2014 | stat = hh.getArrayCopy();
|
---|
2015 | for (int j=0; j<nTerms; ++j){
|
---|
2016 | coeffSd[j] = Math.sqrt(stat[j][j]);
|
---|
2017 | }
|
---|
2018 |
|
---|
2019 | for(int i=0; i<this.nTerms;i++){
|
---|
2020 | for(int j=0; j<this.nTerms;j++){
|
---|
2021 | this.covar[i][j]=stat[i][j];
|
---|
2022 | }
|
---|
2023 | }
|
---|
2024 |
|
---|
2025 | for(int i=0; i<this.nTerms;i++){
|
---|
2026 | for(int j=0; j<this.nTerms;j++){
|
---|
2027 | if(i==j){
|
---|
2028 | this.corrCoeff[i][j] = 1.0D;
|
---|
2029 | }
|
---|
2030 | else{
|
---|
2031 | this.corrCoeff[i][j]=covar[i][j]/(coeffSd[i]*coeffSd[j]);
|
---|
2032 | }
|
---|
2033 | }
|
---|
2034 | }
|
---|
2035 | }
|
---|
2036 |
|
---|
2037 | for(int i=0; i<this.nTerms; i++){
|
---|
2038 | this.bestSd[i] = coeffSd[i];
|
---|
2039 | this.tValues[i] = this.best[i]/this.bestSd[i];
|
---|
2040 | double atv = Math.abs(this.tValues[i]);
|
---|
2041 | if(atv!=atv){
|
---|
2042 | this.pValues[i] = Double.NaN;
|
---|
2043 | }
|
---|
2044 | else{
|
---|
2045 | this.pValues[i] = 1.0 - Stat.studentTcdf(-atv, atv, this.degreesOfFreedom);
|
---|
2046 | }
|
---|
2047 | }
|
---|
2048 |
|
---|
2049 | // Linear correlation coefficient
|
---|
2050 | if(this.nXarrays==1 && this.nYarrays==1){
|
---|
2051 | this.xyR = Stat.corrCoeff(this.xData[0], this.yData, this.weight);
|
---|
2052 | }
|
---|
2053 | this.yyR = Stat.corrCoeff(this.yCalc, this.yData, this.weight);
|
---|
2054 |
|
---|
2055 | // Coefficient of determination
|
---|
2056 | this.yMean = Stat.mean(this.yData);
|
---|
2057 | this.yWeightedMean = Stat.mean(this.yData, this.weight);
|
---|
2058 |
|
---|
2059 | this.sumOfSquaresTotal = 0.0;
|
---|
2060 | for(int i=0; i<this.nData; i++){
|
---|
2061 | this.sumOfSquaresTotal += Fmath.square((this.yData[i] - this.yWeightedMean)/weight[i]);
|
---|
2062 | }
|
---|
2063 |
|
---|
2064 | this.sumOfSquaresRegrn = this.sumOfSquaresTotal - this.chiSquare;
|
---|
2065 | if(this.sumOfSquaresRegrn<0.0)this.sumOfSquaresRegrn=0.0;
|
---|
2066 |
|
---|
2067 | this.multR = this.sumOfSquaresRegrn/this.sumOfSquaresTotal;
|
---|
2068 |
|
---|
2069 | // Calculate adjusted multiple coefficient of determination
|
---|
2070 | this.adjustedR = 1.0 - (1.0 - multR)*(this.nData - 1 )/(this.nData - this.nTerms - 1);
|
---|
2071 |
|
---|
2072 | // F-ratio
|
---|
2073 | this.multipleF = multR*(this.nData-this.nTerms-1.0)/((1.0D-this.multR)*this.nTerms);
|
---|
2074 | if(this.multipleF>=0.0)this.multipleFprob = Stat.fTestProb(this.multipleF, this.nXarrays, this.nData-this.nTerms-1);
|
---|
2075 |
|
---|
2076 | }
|
---|
2077 |
|
---|
2078 |
|
---|
2079 | // Nelder and Mead Simplex Simplex Non-linear Regression
|
---|
2080 | protected void nelderMead(Object regFun, double[] start, double[] step, double fTol, int nMax){
|
---|
2081 | int np = start.length; // number of unknown parameters;
|
---|
2082 | if(this.maxConstraintIndex>=np)throw new IllegalArgumentException("You have entered more constrained parameters ("+this.maxConstraintIndex+") than minimisation parameters (" + np + ")");
|
---|
2083 | this.nlrStatus = true; // -> false if convergence criterion not met
|
---|
2084 | this.nTerms = np; // number of parameters whose best estimates are to be determined
|
---|
2085 | int nnp = np+1; // number of simplex apices
|
---|
2086 | this.lastSSnoConstraint=0.0D; // last sum of squares without a penalty constraint being applied
|
---|
2087 |
|
---|
2088 | if(this.scaleOpt<2)this.scale = new double[np]; // scaling factors
|
---|
2089 | if(scaleOpt==2 && scale.length!=start.length)throw new IllegalArgumentException("scale array and initial estimate array are of different lengths");
|
---|
2090 | if(step.length!=start.length)throw new IllegalArgumentException("step array length " + step.length + " and initial estimate array length " + start.length + " are of different");
|
---|
2091 |
|
---|
2092 | // check for zero step sizes
|
---|
2093 | for(int i=0; i<np; i++)if(step[i]==0.0D)throw new IllegalArgumentException("step " + i+ " size is zero");
|
---|
2094 |
|
---|
2095 | // set statistic arrays to NaN if degrees of freedom check ignored
|
---|
2096 | if(this.ignoreDofFcheck){
|
---|
2097 | this.bestSd = new double[this.nTerms];
|
---|
2098 | this.pseudoSd = new double[this.nTerms];
|
---|
2099 | this.tValues = new double[this.nTerms];
|
---|
2100 | this.pValues = new double[this.nTerms];
|
---|
2101 |
|
---|
2102 | this.covar = new double[this.nTerms][this.nTerms];
|
---|
2103 | this.corrCoeff = new double[this.nTerms][this.nTerms];;
|
---|
2104 | for(int i=0; i<this.nTerms; i++){
|
---|
2105 | this.bestSd[i] = Double.NaN;
|
---|
2106 | this.pseudoSd[i] = Double.NaN;
|
---|
2107 | for(int j=0; j<this.nTerms; j++){
|
---|
2108 | this.covar[i][j] = Double.NaN;
|
---|
2109 | this.corrCoeff[i][j] = Double.NaN;
|
---|
2110 | }
|
---|
2111 | }
|
---|
2112 | }
|
---|
2113 |
|
---|
2114 | // set up arrays
|
---|
2115 | this.startH = new double[np]; // holding array of unscaled initial start values
|
---|
2116 | this.stepH = new double[np]; // unscaled initial step values
|
---|
2117 | this.startSH = new double[np]; // holding array of scaled initial start values
|
---|
2118 | this.stepSH = new double[np]; // scaled initial step values
|
---|
2119 | double[]pmin = new double[np]; // Nelder and Mead Pmin
|
---|
2120 | this.best = new double[np]; // best estimates array
|
---|
2121 | this.bestSd = new double[np]; // sd of best estimates array
|
---|
2122 | this.tValues = new double[np]; // t-value of best estimates array
|
---|
2123 | this.pValues = new double[np]; // p-value of best estimates array
|
---|
2124 |
|
---|
2125 | double[][] pp = new double[nnp][nnp]; //Nelder and Mead P
|
---|
2126 | double[] yy = new double[nnp]; //Nelder and Mead y
|
---|
2127 | double[] pbar = new double[nnp]; //Nelder and Mead P with bar superscript
|
---|
2128 | double[] pstar = new double[nnp]; //Nelder and Mead P*
|
---|
2129 | double[] p2star = new double[nnp]; //Nelder and Mead P**
|
---|
2130 |
|
---|
2131 | // mean of absolute values of yData (for testing for minimum)
|
---|
2132 | double yabsmean=0.0D;
|
---|
2133 | for(int i=0; i<this.nData; i++)yabsmean += Math.abs(yData[i]);
|
---|
2134 | yabsmean /= this.nData;
|
---|
2135 |
|
---|
2136 | // Set any single parameter constraint parameters
|
---|
2137 | if(this.penalty){
|
---|
2138 | Integer itemp = (Integer)this.penalties.get(1);
|
---|
2139 | this.nConstraints = itemp.intValue();
|
---|
2140 | this.penaltyParam = new int[this.nConstraints];
|
---|
2141 | this.penaltyCheck = new int[this.nConstraints];
|
---|
2142 | this.constraints = new double[this.nConstraints];
|
---|
2143 | Double dtemp = null;
|
---|
2144 | int j=2;
|
---|
2145 | for(int i=0;i<this.nConstraints;i++){
|
---|
2146 | itemp = (Integer)this.penalties.get(j);
|
---|
2147 | this.penaltyParam[i] = itemp.intValue();
|
---|
2148 | j++;
|
---|
2149 | itemp = (Integer)this.penalties.get(j);
|
---|
2150 | this.penaltyCheck[i] = itemp.intValue();
|
---|
2151 | j++;
|
---|
2152 | dtemp = (Double)this.penalties.get(j);
|
---|
2153 | this.constraints[i] = dtemp.doubleValue();
|
---|
2154 | j++;
|
---|
2155 | }
|
---|
2156 | }
|
---|
2157 |
|
---|
2158 | // Set any multiple parameters constraint parameters
|
---|
2159 | if(this.sumPenalty){
|
---|
2160 | Integer itemp = (Integer)this.sumPenalties.get(1);
|
---|
2161 | this.nSumConstraints = itemp.intValue();
|
---|
2162 | this.sumPenaltyParam = new int[this.nSumConstraints][];
|
---|
2163 | this.sumPlusOrMinus = new double[this.nSumConstraints][];
|
---|
2164 | this.sumPenaltyCheck = new int[this.nSumConstraints];
|
---|
2165 | this.sumPenaltyNumber = new int[this.nSumConstraints];
|
---|
2166 | this.sumConstraints = new double[this.nSumConstraints];
|
---|
2167 | int[] itempArray = null;
|
---|
2168 | double[] dtempArray = null;
|
---|
2169 | Double dtemp = null;
|
---|
2170 | int j=2;
|
---|
2171 | for(int i=0;i<this.nSumConstraints;i++){
|
---|
2172 | itemp = (Integer)this.sumPenalties.get(j);
|
---|
2173 | this.sumPenaltyNumber[i] = itemp.intValue();
|
---|
2174 | j++;
|
---|
2175 | itempArray = (int[])this.sumPenalties.get(j);
|
---|
2176 | this.sumPenaltyParam[i] = itempArray;
|
---|
2177 | j++;
|
---|
2178 | dtempArray = (double[])this.sumPenalties.get(j);
|
---|
2179 | this.sumPlusOrMinus[i] = dtempArray;
|
---|
2180 | j++;
|
---|
2181 | itemp = (Integer)this.sumPenalties.get(j);
|
---|
2182 | this.sumPenaltyCheck[i] = itemp.intValue();
|
---|
2183 | j++;
|
---|
2184 | dtemp = (Double)this.sumPenalties.get(j);
|
---|
2185 | this.sumConstraints[i] = dtemp.doubleValue();
|
---|
2186 | j++;
|
---|
2187 | }
|
---|
2188 | }
|
---|
2189 |
|
---|
2190 | // Store unscaled start and step values
|
---|
2191 | for(int i=0; i<np; i++){
|
---|
2192 | this.startH[i]=start[i];
|
---|
2193 | this.stepH[i]=step[i];
|
---|
2194 | }
|
---|
2195 |
|
---|
2196 | // scale initial estimates and step sizes
|
---|
2197 | if(this.scaleOpt>0){
|
---|
2198 | boolean testzero=false;
|
---|
2199 | for(int i=0; i<np; i++)if(start[i]==0.0D)testzero=true;
|
---|
2200 | if(testzero){
|
---|
2201 | System.out.println("Neler and Mead Simplex: a start value of zero precludes scaling");
|
---|
2202 | System.out.println("Regression performed without scaling");
|
---|
2203 | this.scaleOpt=0;
|
---|
2204 | }
|
---|
2205 | }
|
---|
2206 | switch(this.scaleOpt){
|
---|
2207 | case 0: // No scaling carried out
|
---|
2208 | for(int i=0; i<np; i++)scale[i]=1.0D;
|
---|
2209 | break;
|
---|
2210 | case 1: // All parameters scaled to unity
|
---|
2211 | for(int i=0; i<np; i++){
|
---|
2212 | scale[i]=1.0/start[i];
|
---|
2213 | step[i]=step[i]/start[i];
|
---|
2214 | start[i]=1.0D;
|
---|
2215 | }
|
---|
2216 | break;
|
---|
2217 | case 2: // Each parameter scaled by a user provided factor
|
---|
2218 | for(int i=0; i<np; i++){
|
---|
2219 | step[i]*=scale[i];
|
---|
2220 | start[i]*= scale[i];
|
---|
2221 | }
|
---|
2222 | break;
|
---|
2223 | default: throw new IllegalArgumentException("Scaling factor option " + this.scaleOpt + " not recognised");
|
---|
2224 | }
|
---|
2225 |
|
---|
2226 | // set class member values
|
---|
2227 | this.fTol=fTol;
|
---|
2228 | this.nMax=nMax;
|
---|
2229 | this.nIter=0;
|
---|
2230 | for(int i=0; i<np; i++){
|
---|
2231 | this.startSH[i] = start[i];
|
---|
2232 | this.stepSH[i] = step[i];
|
---|
2233 | this.scale[i] = scale[i];
|
---|
2234 | }
|
---|
2235 |
|
---|
2236 | // initial simplex
|
---|
2237 | double sho=0.0D;
|
---|
2238 | for (int i=0; i<np; ++i){
|
---|
2239 | sho=start[i];
|
---|
2240 | pstar[i]=sho;
|
---|
2241 | p2star[i]=sho;
|
---|
2242 | pmin[i]=sho;
|
---|
2243 | }
|
---|
2244 |
|
---|
2245 | int jcount=this.konvge; // count of number of restarts still available
|
---|
2246 |
|
---|
2247 | for (int i=0; i<np; ++i){
|
---|
2248 | pp[i][nnp-1]=start[i];
|
---|
2249 | }
|
---|
2250 | yy[nnp-1]=this.sumSquares(regFun, start);
|
---|
2251 | for (int j=0; j<np; ++j){
|
---|
2252 | start[j]=start[j]+step[j];
|
---|
2253 |
|
---|
2254 | for (int i=0; i<np; ++i)pp[i][j]=start[i];
|
---|
2255 | yy[j]=this.sumSquares(regFun, start);
|
---|
2256 | start[j]=start[j]-step[j];
|
---|
2257 | }
|
---|
2258 |
|
---|
2259 | // loop over allowed number of iterations
|
---|
2260 |
|
---|
2261 | double ynewlo=0.0D; // current value lowest y
|
---|
2262 | double ystar = 0.0D; // Nelder and Mead y*
|
---|
2263 | double y2star = 0.0D; // Nelder and Mead y**
|
---|
2264 | double ylo = 0.0D; // Nelder and Mead y(low)
|
---|
2265 | double fMin; // function value at minimum
|
---|
2266 |
|
---|
2267 | int ilo=0; // index of lowest apex
|
---|
2268 | int ihi=0; // index of highest apex
|
---|
2269 | int ln=0; // counter for a check on low and high apices
|
---|
2270 | boolean test = true; // test becomes false on reaching minimum
|
---|
2271 |
|
---|
2272 | // variables used in calculating the variance of the simplex at a putative minimum
|
---|
2273 | double curMin = 00D; // sd of the values at the simplex apices
|
---|
2274 | double sumnm = 0.0D; // for calculating the mean of the apical values
|
---|
2275 | double zn = 0.0D; // for calculating the summation of their differences from the mean
|
---|
2276 | double summnm = 0.0D; // for calculating the variance
|
---|
2277 |
|
---|
2278 | while(test){
|
---|
2279 | // Determine h
|
---|
2280 | ylo=yy[0];
|
---|
2281 | ynewlo=ylo;
|
---|
2282 | ilo=0;
|
---|
2283 | ihi=0;
|
---|
2284 | for (int i=1; i<nnp; ++i){
|
---|
2285 | if (yy[i]<ylo){
|
---|
2286 | ylo=yy[i];
|
---|
2287 | ilo=i;
|
---|
2288 | }
|
---|
2289 | if (yy[i]>ynewlo){
|
---|
2290 | ynewlo=yy[i];
|
---|
2291 | ihi=i;
|
---|
2292 | }
|
---|
2293 | }
|
---|
2294 | // Calculate pbar
|
---|
2295 | for (int i=0; i<np; ++i){
|
---|
2296 | zn=0.0D;
|
---|
2297 | for (int j=0; j<nnp; ++j){
|
---|
2298 | zn += pp[i][j];
|
---|
2299 | }
|
---|
2300 | zn -= pp[i][ihi];
|
---|
2301 | pbar[i] = zn/np;
|
---|
2302 | }
|
---|
2303 |
|
---|
2304 | // Calculate p=(1+alpha).pbar-alpha.ph {Reflection}
|
---|
2305 | for (int i=0; i<np; ++i)pstar[i]=(1.0 + this.rCoeff)*pbar[i]-this.rCoeff*pp[i][ihi];
|
---|
2306 |
|
---|
2307 | // Calculate y*
|
---|
2308 | ystar=this.sumSquares(regFun, pstar);
|
---|
2309 |
|
---|
2310 | ++this.nIter;
|
---|
2311 |
|
---|
2312 | // check for y*<yi
|
---|
2313 | if(ystar < ylo){
|
---|
2314 | // Calculate p**=(1+gamma).p*-gamma.pbar {Extension}
|
---|
2315 | for (int i=0; i<np; ++i)p2star[i]=pstar[i]*(1.0D + this.eCoeff)-this.eCoeff*pbar[i];
|
---|
2316 | // Calculate y**
|
---|
2317 | y2star=this.sumSquares(regFun, p2star);
|
---|
2318 | ++this.nIter;
|
---|
2319 | if(y2star < ylo){
|
---|
2320 | // Replace ph by p**
|
---|
2321 | for (int i=0; i<np; ++i)pp[i][ihi] = p2star[i];
|
---|
2322 | yy[ihi] = y2star;
|
---|
2323 | }
|
---|
2324 | else{
|
---|
2325 | //Replace ph by p*
|
---|
2326 | for (int i=0; i<np; ++i)pp[i][ihi]=pstar[i];
|
---|
2327 | yy[ihi]=ystar;
|
---|
2328 | }
|
---|
2329 | }
|
---|
2330 | else{
|
---|
2331 | // Check y*>yi, i!=h
|
---|
2332 | ln=0;
|
---|
2333 | for (int i=0; i<nnp; ++i)if (i!=ihi && ystar > yy[i]) ++ln;
|
---|
2334 | if (ln==np ){
|
---|
2335 | // y*>= all yi; Check if y*>yh
|
---|
2336 | if(ystar<=yy[ihi]){
|
---|
2337 | // Replace ph by p*
|
---|
2338 | for (int i=0; i<np; ++i)pp[i][ihi]=pstar[i];
|
---|
2339 | yy[ihi]=ystar;
|
---|
2340 | }
|
---|
2341 | // Calculate p** =beta.ph+(1-beta)pbar {Contraction}
|
---|
2342 | for (int i=0; i<np; ++i)p2star[i]=this.cCoeff*pp[i][ihi] + (1.0 - this.cCoeff)*pbar[i];
|
---|
2343 | // Calculate y**
|
---|
2344 | y2star=this.sumSquares(regFun, p2star);
|
---|
2345 | ++this.nIter;
|
---|
2346 | // Check if y**>yh
|
---|
2347 | if(y2star>yy[ihi]){
|
---|
2348 | //Replace all pi by (pi+pl)/2
|
---|
2349 |
|
---|
2350 | for (int j=0; j<nnp; ++j){
|
---|
2351 | for (int i=0; i<np; ++i){
|
---|
2352 | pp[i][j]=0.5*(pp[i][j] + pp[i][ilo]);
|
---|
2353 | pmin[i]=pp[i][j];
|
---|
2354 | }
|
---|
2355 | yy[j]=this.sumSquares(regFun, pmin);
|
---|
2356 | }
|
---|
2357 | this.nIter += nnp;
|
---|
2358 | }
|
---|
2359 | else{
|
---|
2360 | // Replace ph by p**
|
---|
2361 | for (int i=0; i<np; ++i)pp[i][ihi] = p2star[i];
|
---|
2362 | yy[ihi] = y2star;
|
---|
2363 | }
|
---|
2364 | }
|
---|
2365 | else{
|
---|
2366 | // replace ph by p*
|
---|
2367 | for (int i=0; i<np; ++i)pp[i][ihi]=pstar[i];
|
---|
2368 | yy[ihi]=ystar;
|
---|
2369 | }
|
---|
2370 | }
|
---|
2371 |
|
---|
2372 | // test for convergence
|
---|
2373 | // calculte sd of simplex and determine the minimum point
|
---|
2374 | sumnm=0.0;
|
---|
2375 | ynewlo=yy[0];
|
---|
2376 | ilo=0;
|
---|
2377 | for (int i=0; i<nnp; ++i){
|
---|
2378 | sumnm += yy[i];
|
---|
2379 | if(ynewlo>yy[i]){
|
---|
2380 | ynewlo=yy[i];
|
---|
2381 | ilo=i;
|
---|
2382 | }
|
---|
2383 | }
|
---|
2384 | sumnm /= (double)(nnp);
|
---|
2385 | summnm=0.0;
|
---|
2386 | for (int i=0; i<nnp; ++i){
|
---|
2387 | zn=yy[i]-sumnm;
|
---|
2388 | summnm += zn*zn;
|
---|
2389 | }
|
---|
2390 | curMin=Math.sqrt(summnm/np);
|
---|
2391 |
|
---|
2392 | // test simplex sd
|
---|
2393 | switch(this.minTest){
|
---|
2394 | case 0: // terminate if the standard deviation of the sum of squares [unweighted data] or of the chi square values [weighted data]
|
---|
2395 | // at the apices of the simplex is less than the tolerance, fTol
|
---|
2396 | if(curMin<fTol)test=false;
|
---|
2397 | break;
|
---|
2398 | case 1: // terminate if the reduced chi square [weighted data] or the reduced sum of squares [unweighted data] at the lowest apex
|
---|
2399 | // of the simplex is less than the mean of the absolute values of the dependent variable (y values) multiplied by the tolerance, fTol.
|
---|
2400 | if(Math.sqrt(ynewlo/this.degreesOfFreedom)<yabsmean*fTol)test=false;
|
---|
2401 | break;
|
---|
2402 | default: throw new IllegalArgumentException("Simplex standard deviation test option " + this.minTest + " not recognised");
|
---|
2403 | }
|
---|
2404 | this.sumOfSquaresError=ynewlo;
|
---|
2405 | if(!test){
|
---|
2406 | // temporary store of best estimates
|
---|
2407 | for (int i=0; i<np; ++i)pmin[i]=pp[i][ilo];
|
---|
2408 | yy[nnp-1]=ynewlo;
|
---|
2409 | // store simplex sd
|
---|
2410 | this.simplexSd = curMin;
|
---|
2411 | // test for restart
|
---|
2412 | --jcount;
|
---|
2413 | if(jcount>0){
|
---|
2414 | test=true;
|
---|
2415 | for (int j=0; j<np; ++j){
|
---|
2416 | pmin[j]=pmin[j]+step[j];
|
---|
2417 | for (int i=0; i<np; ++i)pp[i][j]=pmin[i];
|
---|
2418 | yy[j]=this.sumSquares(regFun, pmin);
|
---|
2419 | pmin[j]=pmin[j]-step[j];
|
---|
2420 | }
|
---|
2421 | }
|
---|
2422 | }
|
---|
2423 |
|
---|
2424 | // test for reaching allowed number of iterations
|
---|
2425 | if(test && this.nIter>this.nMax){
|
---|
2426 | if(!this.supressErrorMessages){
|
---|
2427 | System.out.println("Maximum iteration number reached, in Regression.simplex(...)");
|
---|
2428 | System.out.println("without the convergence criterion being satisfied");
|
---|
2429 | System.out.println("Current parameter estimates and sum of squares values returned");
|
---|
2430 | }
|
---|
2431 | this.nlrStatus = false;
|
---|
2432 | // store current estimates
|
---|
2433 | for (int i=0; i<np; ++i)pmin[i]=pp[i][ilo];
|
---|
2434 | yy[nnp-1]=ynewlo;
|
---|
2435 | test=false;
|
---|
2436 | }
|
---|
2437 |
|
---|
2438 | }
|
---|
2439 |
|
---|
2440 | // final store of the best estimates, function value at the minimum and number of restarts
|
---|
2441 | for (int i=0; i<np; ++i){
|
---|
2442 | pmin[i] = pp[i][ilo];
|
---|
2443 | this.best[i] = pmin[i]/this.scale[i];
|
---|
2444 | this.scale[i]=1.0D; // unscale for statistical methods
|
---|
2445 | }
|
---|
2446 | this.fMin=ynewlo;
|
---|
2447 | this.kRestart=this.konvge-jcount;
|
---|
2448 |
|
---|
2449 | // perform statistical analysis if possible and requested
|
---|
2450 | if(statFlag){
|
---|
2451 | if(!this.ignoreDofFcheck)pseudoLinearStats(regFun);
|
---|
2452 | }
|
---|
2453 | else{
|
---|
2454 | for (int i=0; i<np; ++i){
|
---|
2455 | this.bestSd[i] = Double.NaN;
|
---|
2456 | }
|
---|
2457 | }
|
---|
2458 | }
|
---|
2459 |
|
---|
2460 | // Nelder and Mead Simplex Simplex Non-linear Regression
|
---|
2461 | public void simplex(RegressionFunction g, double[] start, double[] step, double fTol, int nMax){
|
---|
2462 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays\nsimplex2 should have been called");
|
---|
2463 | Object regFun = (Object)g;
|
---|
2464 | this.lastMethod=3;
|
---|
2465 | this.userSupplied = true;
|
---|
2466 | this.linNonLin = false;
|
---|
2467 | this.zeroCheck = false;
|
---|
2468 | this.degreesOfFreedom = this.nData - start.length;
|
---|
2469 | this.nelderMead(regFun, start, step, fTol, nMax);
|
---|
2470 | }
|
---|
2471 |
|
---|
2472 |
|
---|
2473 | // Nelder and Mead Simplex Simplex Non-linear Regression
|
---|
2474 | // plus plot and output file
|
---|
2475 | public void simplexPlot(RegressionFunction g, double[] start, double[] step, double fTol, int nMax){
|
---|
2476 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays\nsimplexPlot2 should have been called");
|
---|
2477 | Object regFun = (Object)g;
|
---|
2478 | this.lastMethod=3;
|
---|
2479 | this.userSupplied = true;
|
---|
2480 | this.linNonLin = false;
|
---|
2481 | this.zeroCheck = false;
|
---|
2482 | this.degreesOfFreedom = this.nData - start.length;
|
---|
2483 | this.nelderMead(regFun, start, step, fTol, nMax);
|
---|
2484 | if(!this.supressPrint)this.print();
|
---|
2485 | int flag = 0;
|
---|
2486 | if(this.xData.length<2)flag = this.plotXY(g);
|
---|
2487 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
2488 | }
|
---|
2489 |
|
---|
2490 | // Nelder and Mead simplex
|
---|
2491 | // Default maximum iterations
|
---|
2492 | public void simplex(RegressionFunction g, double[] start, double[] step, double fTol){
|
---|
2493 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays\nsimplex2 should have been called");
|
---|
2494 | Object regFun = (Object)g;
|
---|
2495 | int nMaxx = this.nMax;
|
---|
2496 | this.lastMethod=3;
|
---|
2497 | this.userSupplied = true;
|
---|
2498 | this.linNonLin = false;
|
---|
2499 | this.zeroCheck = false;
|
---|
2500 | this.degreesOfFreedom = this.nData - start.length;
|
---|
2501 | this.nelderMead(regFun, start, step, fTol, nMaxx);
|
---|
2502 | }
|
---|
2503 |
|
---|
2504 | // Nelder and Mead Simplex Simplex Non-linear Regression
|
---|
2505 | // plus plot and output file
|
---|
2506 | // Default maximum iterations
|
---|
2507 | public void simplexPlot(RegressionFunction g, double[] start, double[] step, double fTol){
|
---|
2508 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays\nsimplexPlot2 should have been called");
|
---|
2509 | this.lastMethod=3;
|
---|
2510 | this.userSupplied = true;
|
---|
2511 | this.linNonLin = false;
|
---|
2512 | this.zeroCheck = false;
|
---|
2513 | this.simplex(g, start, step, fTol);
|
---|
2514 | if(!this.supressPrint)this.print();
|
---|
2515 | int flag = 0;
|
---|
2516 | if(this.xData.length<2)flag = this.plotXY(g);
|
---|
2517 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
2518 | }
|
---|
2519 |
|
---|
2520 | // Nelder and Mead simplex
|
---|
2521 | // Default tolerance
|
---|
2522 | public void simplex(RegressionFunction g, double[] start, double[] step, int nMax){
|
---|
2523 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays\nsimplex2 should have been called");
|
---|
2524 | Object regFun = (Object)g;
|
---|
2525 | double fToll = this.fTol;
|
---|
2526 | this.lastMethod=3;
|
---|
2527 | this.userSupplied = true;
|
---|
2528 | this.linNonLin = false;
|
---|
2529 | this.zeroCheck = false;
|
---|
2530 | this.degreesOfFreedom = this.nData - start.length;
|
---|
2531 | this.nelderMead(regFun, start, step, fToll, nMax);
|
---|
2532 | }
|
---|
2533 |
|
---|
2534 | // Nelder and Mead Simplex Simplex Non-linear Regression
|
---|
2535 | // plus plot and output file
|
---|
2536 | // Default tolerance
|
---|
2537 | public void simplexPlot(RegressionFunction g, double[] start, double[] step, int nMax){
|
---|
2538 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays\nsimplexPlot2 should have been called");
|
---|
2539 | this.lastMethod=3;
|
---|
2540 | this.userSupplied = true;
|
---|
2541 | this.linNonLin = false;
|
---|
2542 | this.zeroCheck = false;
|
---|
2543 | this.simplex(g, start, step, nMax);
|
---|
2544 | if(!this.supressPrint)this.print();
|
---|
2545 | int flag = 0;
|
---|
2546 | if(this.xData.length<2)flag = this.plotXY(g);
|
---|
2547 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
2548 | }
|
---|
2549 |
|
---|
2550 | // Nelder and Mead simplex
|
---|
2551 | // Default tolerance
|
---|
2552 | // Default maximum iterations
|
---|
2553 | public void simplex(RegressionFunction g, double[] start, double[] step){
|
---|
2554 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays\nsimplex2 should have been called");
|
---|
2555 | Object regFun = (Object)g;
|
---|
2556 | double fToll = this.fTol;
|
---|
2557 | int nMaxx = this.nMax;
|
---|
2558 | this.lastMethod=3;
|
---|
2559 | this.userSupplied = true;
|
---|
2560 | this.linNonLin = false;
|
---|
2561 | this.zeroCheck = false;
|
---|
2562 | this.degreesOfFreedom = this.nData - start.length;
|
---|
2563 | this.nelderMead(regFun, start, step, fToll, nMaxx);
|
---|
2564 | }
|
---|
2565 |
|
---|
2566 | // Nelder and Mead Simplex Simplex Non-linear Regression
|
---|
2567 | // plus plot and output file
|
---|
2568 | // Default tolerance
|
---|
2569 | // Default maximum iterations
|
---|
2570 | public void simplexPlot(RegressionFunction g, double[] start, double[] step){
|
---|
2571 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays\nsimplexPlot2 should have been called");
|
---|
2572 | this.lastMethod=3;
|
---|
2573 | this.userSupplied = true;
|
---|
2574 | this.linNonLin = false;
|
---|
2575 | this.zeroCheck = false;
|
---|
2576 | this.simplex(g, start, step);
|
---|
2577 | if(!this.supressPrint)this.print();
|
---|
2578 | int flag = 0;
|
---|
2579 | if(this.xData.length<2)flag = this.plotXY(g);
|
---|
2580 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
2581 | }
|
---|
2582 |
|
---|
2583 | // Nelder and Mead simplex
|
---|
2584 | // Default step option - all step[i] = dStep
|
---|
2585 | public void simplex(RegressionFunction g, double[] start, double fTol, int nMax){
|
---|
2586 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays\nsimplex2 should have been called");
|
---|
2587 | Object regFun = (Object)g;
|
---|
2588 | int n=start.length;
|
---|
2589 | double[] stepp = new double[n];
|
---|
2590 | for(int i=0; i<n;i++)stepp[i]=this.dStep*start[i];
|
---|
2591 | this.lastMethod=3;
|
---|
2592 | this.userSupplied = true;
|
---|
2593 | this.linNonLin = false;
|
---|
2594 | this.zeroCheck = false;
|
---|
2595 | this.degreesOfFreedom = this.nData - start.length;
|
---|
2596 | this.nelderMead(regFun, start, stepp, fTol, nMax);
|
---|
2597 | }
|
---|
2598 |
|
---|
2599 | // Nelder and Mead Simplex Simplex Non-linear Regression
|
---|
2600 | // plus plot and output file
|
---|
2601 | // Default step option - all step[i] = dStep
|
---|
2602 | public void simplexPlot(RegressionFunction g, double[] start, double fTol, int nMax){
|
---|
2603 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays\nsimplexPlot2 should have been called");
|
---|
2604 | this.lastMethod=3;
|
---|
2605 | this.userSupplied = true;
|
---|
2606 | this.linNonLin = false;
|
---|
2607 | this.zeroCheck = false;
|
---|
2608 | this.simplex(g, start, fTol, nMax);
|
---|
2609 | if(!this.supressPrint)this.print();
|
---|
2610 | int flag = 0;
|
---|
2611 | if(this.xData.length<2)flag = this.plotXY(g);
|
---|
2612 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
2613 | }
|
---|
2614 |
|
---|
2615 | // Nelder and Mead simplex
|
---|
2616 | // Default maximum iterations
|
---|
2617 | // Default step option - all step[i] = dStep
|
---|
2618 | public void simplex(RegressionFunction g, double[] start, double fTol){
|
---|
2619 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays\nsimplex2 should have been called");
|
---|
2620 | Object regFun = (Object)g;
|
---|
2621 | int n=start.length;
|
---|
2622 | int nMaxx = this.nMax;
|
---|
2623 | double[] stepp = new double[n];
|
---|
2624 | for(int i=0; i<n;i++)stepp[i]=this.dStep*start[i];
|
---|
2625 | this.lastMethod=3;
|
---|
2626 | this.userSupplied = true;
|
---|
2627 | this.linNonLin = false;
|
---|
2628 | this.zeroCheck = false;
|
---|
2629 | this.degreesOfFreedom = this.nData - start.length;
|
---|
2630 | this.nelderMead(regFun, start, stepp, fTol, nMaxx);
|
---|
2631 | }
|
---|
2632 |
|
---|
2633 | // Nelder and Mead Simplex Simplex Non-linear Regression
|
---|
2634 | // plus plot and output file
|
---|
2635 | // Default maximum iterations
|
---|
2636 | // Default step option - all step[i] = dStep
|
---|
2637 | public void simplexPlot(RegressionFunction g, double[] start, double fTol){
|
---|
2638 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays\nsimplexPlot2 should have been called");
|
---|
2639 | this.lastMethod=3;
|
---|
2640 | this.userSupplied = true;
|
---|
2641 | this.linNonLin = false;
|
---|
2642 | this.zeroCheck = false;
|
---|
2643 | this.simplex(g, start, fTol);
|
---|
2644 | if(!this.supressPrint)this.print();
|
---|
2645 | int flag = 0;
|
---|
2646 | if(this.xData.length<2)flag = this.plotXY(g);
|
---|
2647 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
2648 | }
|
---|
2649 |
|
---|
2650 | // Nelder and Mead simplex
|
---|
2651 | // Default tolerance
|
---|
2652 | // Default step option - all step[i] = dStep
|
---|
2653 | public void simplex(RegressionFunction g, double[] start, int nMax){
|
---|
2654 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays\nsimplex2 should have been called");
|
---|
2655 | Object regFun = (Object)g;
|
---|
2656 | int n=start.length;
|
---|
2657 | double fToll = this.fTol;
|
---|
2658 | double[] stepp = new double[n];
|
---|
2659 | for(int i=0; i<n;i++)stepp[i]=this.dStep*start[i];
|
---|
2660 | this.lastMethod=3;
|
---|
2661 | this.userSupplied = true;
|
---|
2662 | this.zeroCheck = false;
|
---|
2663 | this.degreesOfFreedom = this.nData - start.length;
|
---|
2664 | this.nelderMead(regFun, start, stepp, fToll, nMax);
|
---|
2665 | }
|
---|
2666 |
|
---|
2667 | // Nelder and Mead Simplex Simplex Non-linear Regression
|
---|
2668 | // plus plot and output file
|
---|
2669 | // Default tolerance
|
---|
2670 | // Default step option - all step[i] = dStep
|
---|
2671 | public void simplexPlot(RegressionFunction g, double[] start, int nMax){
|
---|
2672 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays\nsimplexPlot2 should have been called");
|
---|
2673 | this.lastMethod=3;
|
---|
2674 | this.userSupplied = true;
|
---|
2675 | this.linNonLin = false;
|
---|
2676 | this.zeroCheck = false;
|
---|
2677 | this.simplex(g, start, nMax);
|
---|
2678 | if(!this.supressPrint)this.print();
|
---|
2679 | int flag = 0;
|
---|
2680 | if(this.xData.length<2)flag = this.plotXY(g);
|
---|
2681 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
2682 | }
|
---|
2683 |
|
---|
2684 | // Nelder and Mead simplex
|
---|
2685 | // Default tolerance
|
---|
2686 | // Default maximum iterations
|
---|
2687 | // Default step option - all step[i] = dStep
|
---|
2688 | public void simplex(RegressionFunction g, double[] start){
|
---|
2689 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays\nsimplex2 should have been called");
|
---|
2690 | Object regFun = (Object)g;
|
---|
2691 | int n=start.length;
|
---|
2692 | int nMaxx = this.nMax;
|
---|
2693 | double fToll = this.fTol;
|
---|
2694 | double[] stepp = new double[n];
|
---|
2695 | for(int i=0; i<n;i++)stepp[i]=this.dStep*start[i];
|
---|
2696 | this.lastMethod=3;
|
---|
2697 | this.userSupplied = true;
|
---|
2698 | this.linNonLin = false;
|
---|
2699 | this.zeroCheck = false;
|
---|
2700 | this.degreesOfFreedom = this.nData - start.length;
|
---|
2701 | this.nelderMead(regFun, start, stepp, fToll, nMaxx);
|
---|
2702 | }
|
---|
2703 |
|
---|
2704 | // Nelder and Mead Simplex Simplex Non-linear Regression
|
---|
2705 | // plus plot and output file
|
---|
2706 | // Default tolerance
|
---|
2707 | // Default maximum iterations
|
---|
2708 | // Default step option - all step[i] = dStep
|
---|
2709 | public void simplexPlot(RegressionFunction g, double[] start){
|
---|
2710 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays\nsimplexPlot2 should have been called");
|
---|
2711 | this.lastMethod=3;
|
---|
2712 | this.userSupplied = true;
|
---|
2713 | this.linNonLin = false;
|
---|
2714 | this.zeroCheck = false;
|
---|
2715 | this.simplex(g, start);
|
---|
2716 | if(!this.supressPrint)this.print();
|
---|
2717 | int flag = 0;
|
---|
2718 | if(this.xData.length<2)flag = this.plotXY(g);
|
---|
2719 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
2720 | }
|
---|
2721 |
|
---|
2722 |
|
---|
2723 |
|
---|
2724 | // Nelder and Mead Simplex Simplex2 Non-linear Regression
|
---|
2725 | public void simplex2(RegressionFunction2 g, double[] start, double[] step, double fTol, int nMax){
|
---|
2726 | if(!this.multipleY)throw new IllegalArgumentException("This method cannot handle singly dimensioned y array\nsimplex should have been called");
|
---|
2727 | Object regFun = (Object)g;
|
---|
2728 | this.lastMethod=3;
|
---|
2729 | this.userSupplied = true;
|
---|
2730 | this.linNonLin = false;
|
---|
2731 | this.zeroCheck = false;
|
---|
2732 | this.degreesOfFreedom = this.nData - start.length;
|
---|
2733 | this.nelderMead(regFun, start, step, fTol, nMax);
|
---|
2734 | }
|
---|
2735 |
|
---|
2736 |
|
---|
2737 | // Nelder and Mead Simplex Simplex2 Non-linear Regression
|
---|
2738 | // plus plot and output file
|
---|
2739 | public void simplexPlot2(RegressionFunction2 g, double[] start, double[] step, double fTol, int nMax){
|
---|
2740 | if(!this.multipleY)throw new IllegalArgumentException("This method cannot handle singly dimensioned y array\nsimplex should have been called");
|
---|
2741 | Object regFun = (Object)g;
|
---|
2742 | this.lastMethod=3;
|
---|
2743 | this.userSupplied = true;
|
---|
2744 | this.linNonLin = false;
|
---|
2745 | this.zeroCheck = false;
|
---|
2746 | this.degreesOfFreedom = this.nData - start.length;
|
---|
2747 | this.nelderMead(regFun, start, step, fTol, nMax);
|
---|
2748 | if(!this.supressPrint)this.print();
|
---|
2749 | int flag = 0;
|
---|
2750 | if(this.xData.length<2)flag = this.plotXY2(g);
|
---|
2751 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
2752 | }
|
---|
2753 |
|
---|
2754 | // Nelder and Mead simplex
|
---|
2755 | // Default maximum iterations
|
---|
2756 | public void simplex2(RegressionFunction2 g, double[] start, double[] step, double fTol){
|
---|
2757 | if(!this.multipleY)throw new IllegalArgumentException("This method cannot handle singly dimensioned y array\nsimplex should have been called");
|
---|
2758 | Object regFun = (Object)g;
|
---|
2759 | int nMaxx = this.nMax;
|
---|
2760 | this.lastMethod=3;
|
---|
2761 | this.userSupplied = true;
|
---|
2762 | this.linNonLin = false;
|
---|
2763 | this.zeroCheck = false;
|
---|
2764 | this.degreesOfFreedom = this.nData - start.length;
|
---|
2765 | this.nelderMead(regFun, start, step, fTol, nMaxx);
|
---|
2766 | }
|
---|
2767 |
|
---|
2768 | // Nelder and Mead Simplex Simplex2 Non-linear Regression
|
---|
2769 | // plus plot and output file
|
---|
2770 | // Default maximum iterations
|
---|
2771 | public void simplexPlot2(RegressionFunction2 g, double[] start, double[] step, double fTol){
|
---|
2772 | if(!this.multipleY)throw new IllegalArgumentException("This method cannot handle singly dimensioned y array\nsimplex should have been called");
|
---|
2773 | this.lastMethod=3;
|
---|
2774 | this.userSupplied = true;
|
---|
2775 | this.linNonLin = false;
|
---|
2776 | this.zeroCheck = false;
|
---|
2777 | this.simplex2(g, start, step, fTol);
|
---|
2778 | if(!this.supressPrint)this.print();
|
---|
2779 | int flag = 0;
|
---|
2780 | if(this.xData.length<2)flag = this.plotXY2(g);
|
---|
2781 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
2782 | }
|
---|
2783 |
|
---|
2784 | // Nelder and Mead simplex
|
---|
2785 | // Default tolerance
|
---|
2786 | public void simplex2(RegressionFunction2 g, double[] start, double[] step, int nMax){
|
---|
2787 | if(!this.multipleY)throw new IllegalArgumentException("This method cannot handle singly dimensioned y array\nsimplex should have been called");
|
---|
2788 | Object regFun = (Object)g;
|
---|
2789 | double fToll = this.fTol;
|
---|
2790 | this.lastMethod=3;
|
---|
2791 | this.userSupplied = true;
|
---|
2792 | this.linNonLin = false;
|
---|
2793 | this.zeroCheck = false;
|
---|
2794 | this.degreesOfFreedom = this.nData - start.length;
|
---|
2795 | this.nelderMead(regFun, start, step, fToll, nMax);
|
---|
2796 | }
|
---|
2797 |
|
---|
2798 | // Nelder and Mead Simplex Simplex2 Non-linear Regression
|
---|
2799 | // plus plot and output file
|
---|
2800 | // Default tolerance
|
---|
2801 | public void simplexPlot2(RegressionFunction2 g, double[] start, double[] step, int nMax){
|
---|
2802 | if(!this.multipleY)throw new IllegalArgumentException("This method cannot handle singly dimensioned y array\nsimplex should have been called");
|
---|
2803 | this.lastMethod=3;
|
---|
2804 | this.userSupplied = true;
|
---|
2805 | this.linNonLin = false;
|
---|
2806 | this.zeroCheck = false;
|
---|
2807 | this.simplex2(g, start, step, nMax);
|
---|
2808 | if(!this.supressPrint)this.print();
|
---|
2809 | int flag = 0;
|
---|
2810 | if(this.xData.length<2)flag = this.plotXY2(g);
|
---|
2811 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
2812 | }
|
---|
2813 |
|
---|
2814 | // Nelder and Mead simplex
|
---|
2815 | // Default tolerance
|
---|
2816 | // Default maximum iterations
|
---|
2817 | public void simplex2(RegressionFunction2 g, double[] start, double[] step){
|
---|
2818 | if(!this.multipleY)throw new IllegalArgumentException("This method cannot handle singly dimensioned y array\nsimplex should have been called");
|
---|
2819 | Object regFun = (Object)g;
|
---|
2820 | double fToll = this.fTol;
|
---|
2821 | int nMaxx = this.nMax;
|
---|
2822 | this.lastMethod=3;
|
---|
2823 | this.userSupplied = true;
|
---|
2824 | this.linNonLin = false;
|
---|
2825 | this.zeroCheck = false;
|
---|
2826 | this.degreesOfFreedom = this.nData - start.length;
|
---|
2827 | this.nelderMead(regFun, start, step, fToll, nMaxx);
|
---|
2828 | }
|
---|
2829 |
|
---|
2830 | // Nelder and Mead Simplex Simplex2 Non-linear Regression
|
---|
2831 | // plus plot and output file
|
---|
2832 | // Default tolerance
|
---|
2833 | // Default maximum iterations
|
---|
2834 | public void simplexPlot2(RegressionFunction2 g, double[] start, double[] step){
|
---|
2835 | if(!this.multipleY)throw new IllegalArgumentException("This method cannot handle singly dimensioned y array\nsimplex should have been called");
|
---|
2836 | this.lastMethod=3;
|
---|
2837 | this.userSupplied = true;
|
---|
2838 | this.linNonLin = false;
|
---|
2839 | this.zeroCheck = false;
|
---|
2840 | this.simplex2(g, start, step);
|
---|
2841 | if(!this.supressPrint)this.print();
|
---|
2842 | int flag = 0;
|
---|
2843 | if(this.xData.length<2)flag = this.plotXY2(g);
|
---|
2844 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
2845 | }
|
---|
2846 |
|
---|
2847 | // Nelder and Mead simplex
|
---|
2848 | // Default step option - all step[i] = dStep
|
---|
2849 | public void simplex2(RegressionFunction2 g, double[] start, double fTol, int nMax){
|
---|
2850 | if(!this.multipleY)throw new IllegalArgumentException("This method cannot handle singly dimensioned y array\nsimplex should have been called");
|
---|
2851 | Object regFun = (Object)g;
|
---|
2852 | int n=start.length;
|
---|
2853 | double[] stepp = new double[n];
|
---|
2854 | for(int i=0; i<n;i++)stepp[i]=this.dStep*start[i];
|
---|
2855 | this.lastMethod=3;
|
---|
2856 | this.userSupplied = true;
|
---|
2857 | this.linNonLin = false;
|
---|
2858 | this.zeroCheck = false;
|
---|
2859 | this.degreesOfFreedom = this.nData - start.length;
|
---|
2860 | this.nelderMead(regFun, start, stepp, fTol, nMax);
|
---|
2861 | }
|
---|
2862 |
|
---|
2863 | // Nelder and Mead Simplex Simplex2 Non-linear Regression
|
---|
2864 | // plus plot and output file
|
---|
2865 | // Default step option - all step[i] = dStep
|
---|
2866 | public void simplexPlot2(RegressionFunction2 g, double[] start, double fTol, int nMax){
|
---|
2867 | if(!this.multipleY)throw new IllegalArgumentException("This method cannot handle singly dimensioned y array\nsimplex should have been called");
|
---|
2868 | this.lastMethod=3;
|
---|
2869 | this.userSupplied = true;
|
---|
2870 | this.linNonLin = false;
|
---|
2871 | this.zeroCheck = false;
|
---|
2872 | this.simplex2(g, start, fTol, nMax);
|
---|
2873 | if(!this.supressPrint)this.print();
|
---|
2874 | int flag = 0;
|
---|
2875 | if(this.xData.length<2)flag = this.plotXY2(g);
|
---|
2876 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
2877 | }
|
---|
2878 |
|
---|
2879 | // Nelder and Mead simplex
|
---|
2880 | // Default maximum iterations
|
---|
2881 | // Default step option - all step[i] = dStep
|
---|
2882 | public void simplex2(RegressionFunction2 g, double[] start, double fTol){
|
---|
2883 | if(!this.multipleY)throw new IllegalArgumentException("This method cannot handle singly dimensioned y array\nsimplex should have been called");
|
---|
2884 | Object regFun = (Object)g;
|
---|
2885 | int n=start.length;
|
---|
2886 | int nMaxx = this.nMax;
|
---|
2887 | double[] stepp = new double[n];
|
---|
2888 | for(int i=0; i<n;i++)stepp[i]=this.dStep*start[i];
|
---|
2889 | this.lastMethod=3;
|
---|
2890 | this.userSupplied = true;
|
---|
2891 | this.linNonLin = false;
|
---|
2892 | this.zeroCheck = false;
|
---|
2893 | this.degreesOfFreedom = this.nData - start.length;
|
---|
2894 | this.nelderMead(regFun, start, stepp, fTol, nMaxx);
|
---|
2895 | }
|
---|
2896 |
|
---|
2897 | // Nelder and Mead Simplex Simplex2 Non-linear Regression
|
---|
2898 | // plus plot and output file
|
---|
2899 | // Default maximum iterations
|
---|
2900 | // Default step option - all step[i] = dStep
|
---|
2901 | public void simplexPlot2(RegressionFunction2 g, double[] start, double fTol){
|
---|
2902 | if(!this.multipleY)throw new IllegalArgumentException("This method cannot handle singly dimensioned y array\nsimplex should have been called");
|
---|
2903 | this.lastMethod=3;
|
---|
2904 | this.userSupplied = true;
|
---|
2905 | this.linNonLin = false;
|
---|
2906 | this.zeroCheck = false;
|
---|
2907 | this.simplex2(g, start, fTol);
|
---|
2908 | if(!this.supressPrint)this.print();
|
---|
2909 | int flag = 0;
|
---|
2910 | if(this.xData.length<2)flag = this.plotXY2(g);
|
---|
2911 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
2912 | }
|
---|
2913 |
|
---|
2914 | // Nelder and Mead simplex
|
---|
2915 | // Default tolerance
|
---|
2916 | // Default step option - all step[i] = dStep
|
---|
2917 | public void simplex2(RegressionFunction2 g, double[] start, int nMax){
|
---|
2918 | if(!this.multipleY)throw new IllegalArgumentException("This method cannot handle singly dimensioned y array\nsimplex should have been called");
|
---|
2919 | Object regFun = (Object)g;
|
---|
2920 | int n=start.length;
|
---|
2921 | double fToll = this.fTol;
|
---|
2922 | double[] stepp = new double[n];
|
---|
2923 | for(int i=0; i<n;i++)stepp[i]=this.dStep*start[i];
|
---|
2924 | this.lastMethod=3;
|
---|
2925 | this.userSupplied = true;
|
---|
2926 | this.zeroCheck = false;
|
---|
2927 | this.degreesOfFreedom = this.nData - start.length;
|
---|
2928 | this.nelderMead(regFun, start, stepp, fToll, nMax);
|
---|
2929 | }
|
---|
2930 |
|
---|
2931 | // Nelder and Mead Simplex Simplex2 Non-linear Regression
|
---|
2932 | // plus plot and output file
|
---|
2933 | // Default tolerance
|
---|
2934 | // Default step option - all step[i] = dStep
|
---|
2935 | public void simplexPlot2(RegressionFunction2 g, double[] start, int nMax){
|
---|
2936 | if(!this.multipleY)throw new IllegalArgumentException("This method cannot handle singly dimensioned y array\nsimplex should have been called");
|
---|
2937 | this.lastMethod=3;
|
---|
2938 | this.userSupplied = true;
|
---|
2939 | this.linNonLin = false;
|
---|
2940 | this.zeroCheck = false;
|
---|
2941 | this.simplex2(g, start, nMax);
|
---|
2942 | if(!this.supressPrint)this.print();
|
---|
2943 | int flag = 0;
|
---|
2944 | if(this.xData.length<2)flag = this.plotXY2(g);
|
---|
2945 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
2946 | }
|
---|
2947 |
|
---|
2948 | // Nelder and Mead simplex
|
---|
2949 | // Default tolerance
|
---|
2950 | // Default maximum iterations
|
---|
2951 | // Default step option - all step[i] = dStep
|
---|
2952 | public void simplex2(RegressionFunction2 g, double[] start){
|
---|
2953 | if(!this.multipleY)throw new IllegalArgumentException("This method cannot handle singly dimensioned y array\nsimplex should have been called");
|
---|
2954 | Object regFun = (Object)g;
|
---|
2955 | int n=start.length;
|
---|
2956 | int nMaxx = this.nMax;
|
---|
2957 | double fToll = this.fTol;
|
---|
2958 | double[] stepp = new double[n];
|
---|
2959 | for(int i=0; i<n;i++)stepp[i]=this.dStep*start[i];
|
---|
2960 | this.lastMethod=3;
|
---|
2961 | this.userSupplied = true;
|
---|
2962 | this.linNonLin = false;
|
---|
2963 | this.zeroCheck = false;
|
---|
2964 | this.degreesOfFreedom = this.nData - start.length;
|
---|
2965 | this.nelderMead(regFun, start, stepp, fToll, nMaxx);
|
---|
2966 | }
|
---|
2967 |
|
---|
2968 | // Nelder and Mead Simplex Simplex2 Non-linear Regression
|
---|
2969 | // plus plot and output file
|
---|
2970 | // Default tolerance
|
---|
2971 | // Default maximum iterations
|
---|
2972 | // Default step option - all step[i] = dStep
|
---|
2973 | public void simplexPlot2(RegressionFunction2 g, double[] start){
|
---|
2974 | if(!this.multipleY)throw new IllegalArgumentException("This method cannot handle singly dimensioned y array\nsimplex should have been called");
|
---|
2975 | this.lastMethod=3;
|
---|
2976 | this.userSupplied = true;
|
---|
2977 | this.linNonLin = false;
|
---|
2978 | this.zeroCheck = false;
|
---|
2979 | this.simplex2(g, start);
|
---|
2980 | if(!this.supressPrint)this.print();
|
---|
2981 | int flag = 0;
|
---|
2982 | if(this.xData.length<2)flag = this.plotXY2(g);
|
---|
2983 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
2984 | }
|
---|
2985 |
|
---|
2986 | // Calculate the sum of squares of the residuals for non-linear regression
|
---|
2987 | protected double sumSquares(Object regFun, double[] x){
|
---|
2988 | RegressionFunction g1 = null;
|
---|
2989 | RegressionFunction2 g2 = null;
|
---|
2990 | if(this.multipleY){
|
---|
2991 | g2 = (RegressionFunction2)regFun;
|
---|
2992 | }
|
---|
2993 | else{
|
---|
2994 | g1 = (RegressionFunction)regFun;
|
---|
2995 | }
|
---|
2996 |
|
---|
2997 | double ss = -3.0D;
|
---|
2998 | double[] param = new double[this.nTerms];
|
---|
2999 | double[] xd = new double[this.nXarrays];
|
---|
3000 | // rescale for calcultion of the function
|
---|
3001 | for(int i=0; i<this.nTerms; i++)param[i]=x[i]/this.scale[i];
|
---|
3002 |
|
---|
3003 | // single parameter penalty functions
|
---|
3004 | double tempFunctVal = this.lastSSnoConstraint;
|
---|
3005 | boolean test=true;
|
---|
3006 | if(this.penalty){
|
---|
3007 | int k=0;
|
---|
3008 | for(int i=0; i<this.nConstraints; i++){
|
---|
3009 | k = this.penaltyParam[i];
|
---|
3010 | switch(penaltyCheck[i]){
|
---|
3011 | case -1: // parameter constrained to lie above a given constraint value
|
---|
3012 | if(param[k]<constraints[i]){
|
---|
3013 | ss = tempFunctVal + this.penaltyWeight*Fmath.square(constraints[i]-param[k]);
|
---|
3014 | test=false;
|
---|
3015 | }
|
---|
3016 | break;
|
---|
3017 | case 0: // parameter constrained to lie within a given tolerance about a constraint value
|
---|
3018 | if(param[k]<constraints[i]*(1.0-this.constraintTolerance)){
|
---|
3019 | ss = tempFunctVal + this.penaltyWeight*Fmath.square(constraints[i]*(1.0-this.constraintTolerance)-param[k]);
|
---|
3020 | test=false;
|
---|
3021 | }
|
---|
3022 | if(param[k]>constraints[i]*(1.0+this.constraintTolerance)){
|
---|
3023 | ss = tempFunctVal + this.penaltyWeight*Fmath.square(param[k]-constraints[i]*(1.0+this.constraintTolerance));
|
---|
3024 | test=false;
|
---|
3025 | }
|
---|
3026 | break;
|
---|
3027 | case 1: // parameter constrained to lie below a given constraint value
|
---|
3028 | if(param[k]>constraints[i]){
|
---|
3029 | ss = tempFunctVal + this.penaltyWeight*Fmath.square(param[k]-constraints[i]);
|
---|
3030 | test=false;
|
---|
3031 | }
|
---|
3032 | break;
|
---|
3033 | default: throw new IllegalArgumentException("The " + i + "th penalty check " + penaltyCheck[i] + " not recognised");
|
---|
3034 |
|
---|
3035 | }
|
---|
3036 | }
|
---|
3037 | }
|
---|
3038 |
|
---|
3039 | // multiple parameter penalty functions
|
---|
3040 | if(this.sumPenalty){
|
---|
3041 | int kk = 0;
|
---|
3042 | double pSign = 0;
|
---|
3043 | for(int i=0; i<this.nSumConstraints; i++){
|
---|
3044 | double sumPenaltySum = 0.0D;
|
---|
3045 | for(int j=0; j<this.sumPenaltyNumber[i]; j++){
|
---|
3046 | kk = this.sumPenaltyParam[i][j];
|
---|
3047 | pSign = this.sumPlusOrMinus[i][j];
|
---|
3048 | sumPenaltySum += param[kk]*pSign;
|
---|
3049 | }
|
---|
3050 | switch(this.sumPenaltyCheck[i]){
|
---|
3051 | case -1: // designated 'parameter sum' constrained to lie above a given constraint value
|
---|
3052 | if(sumPenaltySum<sumConstraints[i]){
|
---|
3053 | ss = tempFunctVal + this.penaltyWeight*Fmath.square(sumConstraints[i]-sumPenaltySum);
|
---|
3054 | test=false;
|
---|
3055 | }
|
---|
3056 | break;
|
---|
3057 | case 0: // designated 'parameter sum' constrained to lie within a given tolerance about a given constraint value
|
---|
3058 | if(sumPenaltySum<sumConstraints[i]*(1.0-this.constraintTolerance)){
|
---|
3059 | ss = tempFunctVal + this.penaltyWeight*Fmath.square(sumConstraints[i]*(1.0-this.constraintTolerance)-sumPenaltySum);
|
---|
3060 | test=false;
|
---|
3061 | }
|
---|
3062 | if(sumPenaltySum>sumConstraints[i]*(1.0+this.constraintTolerance)){
|
---|
3063 | ss = tempFunctVal + this.penaltyWeight*Fmath.square(sumPenaltySum-sumConstraints[i]*(1.0+this.constraintTolerance));
|
---|
3064 | test=false;
|
---|
3065 | }
|
---|
3066 | break;
|
---|
3067 | case 1: // designated 'parameter sum' constrained to lie below a given constraint value
|
---|
3068 | if(sumPenaltySum>sumConstraints[i]){
|
---|
3069 | ss = tempFunctVal + this.penaltyWeight*Fmath.square(sumPenaltySum-sumConstraints[i]);
|
---|
3070 | test=false;
|
---|
3071 | }
|
---|
3072 | break;
|
---|
3073 | default: throw new IllegalArgumentException("The " + i + "th summation penalty check " + sumPenaltyCheck[i] + " not recognised");
|
---|
3074 | }
|
---|
3075 | }
|
---|
3076 | }
|
---|
3077 |
|
---|
3078 | // call function calculation and calculate the sum of squares if constraints have not intervened
|
---|
3079 | if(test){
|
---|
3080 | ss = 0.0D;
|
---|
3081 | for(int i=0; i<this.nData; i++){
|
---|
3082 | for(int j=0; j<nXarrays; j++)xd[j]=this.xData[j][i];
|
---|
3083 | if(!this.multipleY){
|
---|
3084 | ss += Fmath.square((this.yData[i] - g1.function(param, xd))/this.weight[i]);
|
---|
3085 | }
|
---|
3086 | else{
|
---|
3087 | ss += Fmath.square((this.yData[i] - g2.function(param, xd, i))/this.weight[i]);
|
---|
3088 | }
|
---|
3089 |
|
---|
3090 | }
|
---|
3091 | this.lastSSnoConstraint = ss;
|
---|
3092 |
|
---|
3093 | }
|
---|
3094 |
|
---|
3095 | // return sum of squares
|
---|
3096 | return ss;
|
---|
3097 | }
|
---|
3098 |
|
---|
3099 |
|
---|
3100 | // add a single parameter constraint boundary for the non-linear regression
|
---|
3101 | public void addConstraint(int paramIndex, int conDir, double constraint){
|
---|
3102 | this.penalty=true;
|
---|
3103 |
|
---|
3104 | // First element reserved for method number if other methods than 'cliff' are added later
|
---|
3105 | if(this.penalties.isEmpty())this.penalties.add(new Integer(this.constraintMethod));
|
---|
3106 |
|
---|
3107 | // add constraint
|
---|
3108 | if(penalties.size()==1){
|
---|
3109 | this.penalties.add(new Integer(1));
|
---|
3110 | }
|
---|
3111 | else{
|
---|
3112 | int nPC = ((Integer)this.penalties.get(1)).intValue();
|
---|
3113 | nPC++;
|
---|
3114 | this.penalties.set(1, new Integer(nPC));
|
---|
3115 | }
|
---|
3116 | this.penalties.add(new Integer(paramIndex));
|
---|
3117 | this.penalties.add(new Integer(conDir));
|
---|
3118 | this.penalties.add(new Double(constraint));
|
---|
3119 | if(paramIndex>this.maxConstraintIndex)this.maxConstraintIndex = paramIndex;
|
---|
3120 | }
|
---|
3121 |
|
---|
3122 |
|
---|
3123 | // add a multiple parameter constraint boundary for the non-linear regression
|
---|
3124 | public void addConstraint(int[] paramIndices, int[] plusOrMinus, int conDir, double constraint){
|
---|
3125 | ArrayMaths am = new ArrayMaths(plusOrMinus);
|
---|
3126 | double[] dpom = am.getArray_as_double();
|
---|
3127 | addConstraint(paramIndices, dpom, conDir, constraint);
|
---|
3128 | }
|
---|
3129 |
|
---|
3130 | // add a multiple parameter constraint boundary for the non-linear regression
|
---|
3131 | public void addConstraint(int[] paramIndices, double[] plusOrMinus, int conDir, double constraint){
|
---|
3132 | int nCon = paramIndices.length;
|
---|
3133 | int nPorM = plusOrMinus.length;
|
---|
3134 | if(nCon!=nPorM)throw new IllegalArgumentException("num of parameters, " + nCon + ", does not equal number of parameter signs, " + nPorM);
|
---|
3135 | this.sumPenalty=true;
|
---|
3136 |
|
---|
3137 | // First element reserved for method number if other methods than 'cliff' are added later
|
---|
3138 | if(this.sumPenalties.isEmpty())this.sumPenalties.add(new Integer(this.constraintMethod));
|
---|
3139 |
|
---|
3140 | // add constraint
|
---|
3141 | if(sumPenalties.size()==1){
|
---|
3142 | this.sumPenalties.add(new Integer(1));
|
---|
3143 | }
|
---|
3144 | else{
|
---|
3145 | int nPC = ((Integer)this.sumPenalties.get(1)).intValue();
|
---|
3146 | nPC++;
|
---|
3147 | this.sumPenalties.set(1, new Integer(nPC));
|
---|
3148 | }
|
---|
3149 | this.sumPenalties.add(new Integer(nCon));
|
---|
3150 | this.sumPenalties.add(paramIndices);
|
---|
3151 | this.sumPenalties.add(plusOrMinus);
|
---|
3152 | this.sumPenalties.add(new Integer(conDir));
|
---|
3153 | this.sumPenalties.add(new Double(constraint));
|
---|
3154 | ArrayMaths am = new ArrayMaths(paramIndices);
|
---|
3155 | int maxI = am.getMaximum_as_int();
|
---|
3156 | if(maxI>this.maxConstraintIndex)this.maxConstraintIndex = maxI;
|
---|
3157 | }
|
---|
3158 |
|
---|
3159 |
|
---|
3160 | // remove all constraint boundaries for the non-linear regression
|
---|
3161 | public void removeConstraints(){
|
---|
3162 |
|
---|
3163 | // check if single parameter constraints already set
|
---|
3164 | if(!this.penalties.isEmpty()){
|
---|
3165 | int m=this.penalties.size();
|
---|
3166 |
|
---|
3167 | // remove single parameter constraints
|
---|
3168 | for(int i=m-1; i>=0; i--){
|
---|
3169 | this.penalties.remove(i);
|
---|
3170 | }
|
---|
3171 | }
|
---|
3172 | this.penalty = false;
|
---|
3173 | this.nConstraints = 0;
|
---|
3174 |
|
---|
3175 | // check if mutiple parameter constraints already set
|
---|
3176 | if(!this.sumPenalties.isEmpty()){
|
---|
3177 | int m=this.sumPenalties.size();
|
---|
3178 |
|
---|
3179 | // remove multiple parameter constraints
|
---|
3180 | for(int i=m-1; i>=0; i--){
|
---|
3181 | this.sumPenalties.remove(i);
|
---|
3182 | }
|
---|
3183 | }
|
---|
3184 | this.sumPenalty = false;
|
---|
3185 | this.nSumConstraints = 0;
|
---|
3186 | this.maxConstraintIndex = -1;
|
---|
3187 | }
|
---|
3188 |
|
---|
3189 |
|
---|
3190 | // Reset the tolerance used in a fixed value constraint
|
---|
3191 | public void setConstraintTolerance(double tolerance){
|
---|
3192 | this.constraintTolerance = tolerance;
|
---|
3193 | }
|
---|
3194 |
|
---|
3195 |
|
---|
3196 | // linear statistics applied to a non-linear regression
|
---|
3197 | protected int pseudoLinearStats(Object regFun){
|
---|
3198 | double f1 = 0.0D, f2 = 0.0D, f3 = 0.0D, f4 = 0.0D; // intermdiate values in numerical differentiation
|
---|
3199 | int flag = 0; // returned as 0 if method fully successful;
|
---|
3200 | // negative if partially successful or unsuccessful: check posVarFlag and invertFlag
|
---|
3201 | // -1 posVarFlag or invertFlag is false;
|
---|
3202 | // -2 posVarFlag and invertFlag are false
|
---|
3203 | int np = this.nTerms;
|
---|
3204 |
|
---|
3205 | double[] f = new double[np];
|
---|
3206 | double[] pmin = new double[np];
|
---|
3207 | double[] coeffSd = new double[np];
|
---|
3208 | double[] xd = new double[this.nXarrays];
|
---|
3209 | double[][]stat = new double[np][np];
|
---|
3210 | pseudoSd = new double[np];
|
---|
3211 |
|
---|
3212 | Double temp = null;
|
---|
3213 |
|
---|
3214 | this.grad = new double[np][2];
|
---|
3215 | this.covar = new double[np][np];
|
---|
3216 | this.corrCoeff = new double[np][np];
|
---|
3217 |
|
---|
3218 | // get best estimates
|
---|
3219 | pmin = Conv.copy(best);
|
---|
3220 |
|
---|
3221 | // gradient both sides of the minimum
|
---|
3222 | double hold0 = 1.0D;
|
---|
3223 | double hold1 = 1.0D;
|
---|
3224 | for (int i=0;i<np; ++i){
|
---|
3225 | for (int k=0;k<np; ++k){
|
---|
3226 | f[k]=pmin[k];
|
---|
3227 | }
|
---|
3228 | hold0=pmin[i];
|
---|
3229 | if(hold0==0.0D){
|
---|
3230 | hold0=this.stepH[i];
|
---|
3231 | this.zeroCheck=true;
|
---|
3232 | }
|
---|
3233 | f[i]=hold0*(1.0D - this.delta);
|
---|
3234 | this.lastSSnoConstraint=this.sumOfSquaresError;
|
---|
3235 | f1=sumSquares(regFun, f);
|
---|
3236 | f[i]=hold0*(1.0 + this.delta);
|
---|
3237 | this.lastSSnoConstraint=this.sumOfSquaresError;
|
---|
3238 | f2=sumSquares(regFun, f);
|
---|
3239 | this.grad[i][0]=(this.fMin-f1)/Math.abs(this.delta*hold0);
|
---|
3240 | this.grad[i][1]=(f2-this.fMin)/Math.abs(this.delta*hold0);
|
---|
3241 | }
|
---|
3242 |
|
---|
3243 | // second patial derivatives at the minimum
|
---|
3244 | this.lastSSnoConstraint=this.sumOfSquaresError;
|
---|
3245 | for (int i=0;i<np; ++i){
|
---|
3246 | for (int j=0;j<np; ++j){
|
---|
3247 | for (int k=0;k<np; ++k){
|
---|
3248 | f[k]=pmin[k];
|
---|
3249 | }
|
---|
3250 | hold0=f[i];
|
---|
3251 | if(hold0==0.0D){
|
---|
3252 | hold0=this.stepH[i];
|
---|
3253 | this.zeroCheck=true;
|
---|
3254 | }
|
---|
3255 | f[i]=hold0*(1.0 + this.delta/2.0D);
|
---|
3256 | hold0=f[j];
|
---|
3257 | if(hold0==0.0D){
|
---|
3258 | hold0=this.stepH[j];
|
---|
3259 | this.zeroCheck=true;
|
---|
3260 | }
|
---|
3261 | f[j]=hold0*(1.0 + this.delta/2.0D);
|
---|
3262 | this.lastSSnoConstraint=this.sumOfSquaresError;
|
---|
3263 | f1=sumSquares(regFun, f);
|
---|
3264 | f[i]=pmin[i];
|
---|
3265 | f[j]=pmin[j];
|
---|
3266 | hold0=f[i];
|
---|
3267 | if(hold0==0.0D){
|
---|
3268 | hold0=this.stepH[i];
|
---|
3269 | this.zeroCheck=true;
|
---|
3270 | }
|
---|
3271 | f[i]=hold0*(1.0 - this.delta/2.0D);
|
---|
3272 | hold0=f[j];
|
---|
3273 | if(hold0==0.0D){
|
---|
3274 | hold0=this.stepH[j];
|
---|
3275 | this.zeroCheck=true;
|
---|
3276 | }
|
---|
3277 | f[j]=hold0*(1.0 + this.delta/2.0D);
|
---|
3278 | this.lastSSnoConstraint=this.sumOfSquaresError;
|
---|
3279 | f2=sumSquares(regFun, f);
|
---|
3280 | f[i]=pmin[i];
|
---|
3281 | f[j]=pmin[j];
|
---|
3282 | hold0=f[i];
|
---|
3283 | if(hold0==0.0D){
|
---|
3284 | hold0=this.stepH[i];
|
---|
3285 | this.zeroCheck=true;
|
---|
3286 | }
|
---|
3287 | f[i]=hold0*(1.0 + this.delta/2.0D);
|
---|
3288 | hold0=f[j];
|
---|
3289 | if(hold0==0.0D){
|
---|
3290 | hold0=this.stepH[j];
|
---|
3291 | this.zeroCheck=true;
|
---|
3292 | }
|
---|
3293 | f[j]=hold0*(1.0 - this.delta/2.0D);
|
---|
3294 | this.lastSSnoConstraint=this.sumOfSquaresError;
|
---|
3295 | f3=sumSquares(regFun, f);
|
---|
3296 | f[i]=pmin[i];
|
---|
3297 | f[j]=pmin[j];
|
---|
3298 | hold0=f[i];
|
---|
3299 | if(hold0==0.0D){
|
---|
3300 | hold0=this.stepH[i];
|
---|
3301 | this.zeroCheck=true;
|
---|
3302 | }
|
---|
3303 | f[i]=hold0*(1.0 - this.delta/2.0D);
|
---|
3304 | hold0=f[j];
|
---|
3305 | if(hold0==0.0D){
|
---|
3306 | hold0=this.stepH[j];
|
---|
3307 | this.zeroCheck=true;
|
---|
3308 | }
|
---|
3309 | f[j]=hold0*(1.0 - this.delta/2.0D);
|
---|
3310 | this.lastSSnoConstraint=this.sumOfSquaresError;
|
---|
3311 | f4=sumSquares(regFun, f);
|
---|
3312 | stat[i][j]=(f1-f2-f3+f4)/(this.delta*this.delta);
|
---|
3313 | }
|
---|
3314 | }
|
---|
3315 |
|
---|
3316 | double ss=0.0D;
|
---|
3317 | double sc=0.0D;
|
---|
3318 | for(int i=0; i<this.nData; i++){
|
---|
3319 | for(int j=0; j<nXarrays; j++)xd[j]=this.xData[j][i];
|
---|
3320 | if(this.multipleY){
|
---|
3321 | this.yCalc[i] = ((RegressionFunction2)regFun).function(pmin, xd, i);
|
---|
3322 | }
|
---|
3323 | else{
|
---|
3324 | this.yCalc[i] = ((RegressionFunction)regFun).function(pmin, xd);
|
---|
3325 | }
|
---|
3326 | this.residual[i] = this.yCalc[i]-this.yData[i];
|
---|
3327 | ss += Fmath.square(this.residual[i]);
|
---|
3328 | this.residualW[i] = this.residual[i]/this.weight[i];
|
---|
3329 | sc += Fmath.square(this.residualW[i]);
|
---|
3330 | }
|
---|
3331 | this.sumOfSquaresError = ss;
|
---|
3332 | double varY = ss/(this.nData-np);
|
---|
3333 | double sdY = Math.sqrt(varY);
|
---|
3334 | this.chiSquare=sc;
|
---|
3335 | this.reducedChiSquare=sc/(this.nData-np);
|
---|
3336 |
|
---|
3337 | // calculate reduced sum of squares
|
---|
3338 | double red=1.0D;
|
---|
3339 | if(!this.weightOpt && !this.trueFreq)red=this.sumOfSquaresError/(this.nData-np);
|
---|
3340 |
|
---|
3341 | // calculate pseudo errors - reduced sum of squares over second partial derivative
|
---|
3342 | for(int i=0; i<np; i++){
|
---|
3343 | pseudoSd[i] = (2.0D*this.delta*red*Math.abs(pmin[i]))/(grad[i][1]-grad[i][0]);
|
---|
3344 | if(pseudoSd[i]>=0.0D){
|
---|
3345 | pseudoSd[i] = Math.sqrt(pseudoSd[i]);
|
---|
3346 | }
|
---|
3347 | else{
|
---|
3348 | pseudoSd[i] = Double.NaN;
|
---|
3349 | }
|
---|
3350 | }
|
---|
3351 |
|
---|
3352 | // calculate covariance matrix
|
---|
3353 | if(np==1){
|
---|
3354 | hold0=pmin[0];
|
---|
3355 | if(hold0==0.0D)hold0=this.stepH[0];
|
---|
3356 | stat[0][0]=1.0D/stat[0][0];
|
---|
3357 | this.covar[0][0] = stat[0][0]*red*hold0*hold0;
|
---|
3358 | if(covar[0][0]>=0.0D){
|
---|
3359 | coeffSd[0]=Math.sqrt(this.covar[0][0]);
|
---|
3360 | corrCoeff[0][0]=1.0D;
|
---|
3361 | }
|
---|
3362 | else{
|
---|
3363 | coeffSd[0]=Double.NaN;
|
---|
3364 | corrCoeff[0][0]=Double.NaN;
|
---|
3365 | this.posVarFlag=false;
|
---|
3366 | }
|
---|
3367 | }
|
---|
3368 | else{
|
---|
3369 | Matrix cov = new Matrix(stat);
|
---|
3370 | if(this.supressErrorMessages)cov.supressErrorMessage();
|
---|
3371 | double determinant = cov.determinant();
|
---|
3372 | if(determinant==0){
|
---|
3373 | this.invertFlag=false;
|
---|
3374 | }
|
---|
3375 | else{
|
---|
3376 | cov = cov.inverse();
|
---|
3377 | this.invertFlag = cov.getMatrixCheck();
|
---|
3378 | }
|
---|
3379 | if(this.invertFlag==false)flag--;
|
---|
3380 | stat = cov.getArrayCopy();
|
---|
3381 |
|
---|
3382 | this.posVarFlag=true;
|
---|
3383 | if (this.invertFlag){
|
---|
3384 | for (int i=0; i<np; ++i){
|
---|
3385 | hold0=pmin[i];
|
---|
3386 | if(hold0==0.0D)hold0=this.stepH[i];
|
---|
3387 | for (int j=i; j<np;++j){
|
---|
3388 | hold1=pmin[j];
|
---|
3389 | if(hold1==0.0D)hold1=this.stepH[j];
|
---|
3390 | this.covar[i][j] = 2.0D*stat[i][j]*red*hold0*hold1;
|
---|
3391 | this.covar[j][i] = this.covar[i][j];
|
---|
3392 | }
|
---|
3393 | if(covar[i][i]>=0.0D){
|
---|
3394 | coeffSd[i]=Math.sqrt(this.covar[i][i]);
|
---|
3395 | }
|
---|
3396 | else{
|
---|
3397 | coeffSd[i]=Double.NaN;
|
---|
3398 | this.posVarFlag=false;
|
---|
3399 | }
|
---|
3400 | }
|
---|
3401 |
|
---|
3402 | for (int i=0; i<np; ++i){
|
---|
3403 | for (int j=0; j<np; ++j){
|
---|
3404 | if((coeffSd[i]!= Double.NaN) && (coeffSd[j]!= Double.NaN)){
|
---|
3405 | this.corrCoeff[i][j] = this.covar[i][j]/(coeffSd[i]*coeffSd[j]);
|
---|
3406 | }
|
---|
3407 | else{
|
---|
3408 | this.corrCoeff[i][j]= Double.NaN;
|
---|
3409 | }
|
---|
3410 | }
|
---|
3411 | }
|
---|
3412 | }
|
---|
3413 | else{
|
---|
3414 | for (int i=0; i<np; ++i){
|
---|
3415 | for (int j=0; j<np;++j){
|
---|
3416 | this.covar[i][j] = Double.NaN;
|
---|
3417 | this.corrCoeff[i][j] = Double.NaN;
|
---|
3418 | }
|
---|
3419 | coeffSd[i]=Double.NaN;
|
---|
3420 | }
|
---|
3421 | }
|
---|
3422 | }
|
---|
3423 | if(this.posVarFlag==false)flag--;
|
---|
3424 |
|
---|
3425 | for(int i=0; i<this.nTerms; i++){
|
---|
3426 | this.bestSd[i] = coeffSd[i];
|
---|
3427 | this.tValues[i] = this.best[i]/this.bestSd[i];
|
---|
3428 | double atv = Math.abs(this.tValues[i]);
|
---|
3429 | if(atv!=atv){
|
---|
3430 | this.pValues[i] = Double.NaN;
|
---|
3431 | }
|
---|
3432 | else{
|
---|
3433 | this.pValues[i] = 1.0 - Stat.studentTcdf(-atv, atv, this.degreesOfFreedom);
|
---|
3434 | }
|
---|
3435 | }
|
---|
3436 |
|
---|
3437 | if(this.nXarrays==1 && this.nYarrays==1){
|
---|
3438 | this.xyR = Stat.corrCoeff(this.xData[0], this.yData, this.weight);
|
---|
3439 | }
|
---|
3440 | this.yyR = Stat.corrCoeff(this.yCalc, this.yData, this.weight);
|
---|
3441 |
|
---|
3442 | // Coefficient of determination
|
---|
3443 | this.yMean = Stat.mean(this.yData);
|
---|
3444 | this.yWeightedMean = Stat.mean(this.yData, this.weight);
|
---|
3445 |
|
---|
3446 | this.sumOfSquaresTotal = 0.0;
|
---|
3447 | for(int i=0; i<this.nData; i++){
|
---|
3448 | this.sumOfSquaresTotal += Fmath.square((this.yData[i] - this.yWeightedMean)/weight[i]);
|
---|
3449 | }
|
---|
3450 |
|
---|
3451 | this.sumOfSquaresRegrn = this.sumOfSquaresTotal - this.chiSquare;
|
---|
3452 | if(this.sumOfSquaresRegrn<0.0)this.sumOfSquaresRegrn=0.0;
|
---|
3453 |
|
---|
3454 | this.multR = this.sumOfSquaresRegrn/this.sumOfSquaresTotal;
|
---|
3455 |
|
---|
3456 | // Calculate adjusted multiple coefficient of determination
|
---|
3457 | this.adjustedR = 1.0 - (1.0 - this.multR)*(this.nData - 1 )/(this.nData - this.nXarrays - 1);
|
---|
3458 |
|
---|
3459 | // F-ratio
|
---|
3460 | this.multipleF = this.multR*(this.nData-this.nXarrays-1.0)/((1.0D-this.multR)*this.nXarrays);
|
---|
3461 | if(this.multipleF>=0.0)this.multipleFprob = Stat.fTestProb(this.multipleF, this.nXarrays, this.nData-this.nXarrays-1);
|
---|
3462 |
|
---|
3463 | return flag;
|
---|
3464 |
|
---|
3465 | }
|
---|
3466 |
|
---|
3467 | // Print the results of the regression
|
---|
3468 | // File name provided
|
---|
3469 | // prec = truncation precision
|
---|
3470 | public void print(String filename, int prec){
|
---|
3471 | this.prec = prec;
|
---|
3472 | this.print(filename);
|
---|
3473 | }
|
---|
3474 |
|
---|
3475 | // Print the results of the regression
|
---|
3476 | // No file name provided
|
---|
3477 | // prec = truncation precision
|
---|
3478 | public void print(int prec){
|
---|
3479 | this.prec = prec;
|
---|
3480 | String filename="RegressionOutput.txt";
|
---|
3481 | this.print(filename);
|
---|
3482 | }
|
---|
3483 |
|
---|
3484 | // Print the results of the regression
|
---|
3485 | // File name provided
|
---|
3486 | // default value for truncation precision
|
---|
3487 | public void print(String filename){
|
---|
3488 | if(filename.indexOf('.')==-1)filename = filename+".txt";
|
---|
3489 | FileOutput fout = new FileOutput(filename, 'n');
|
---|
3490 | fout.dateAndTimeln(filename);
|
---|
3491 | fout.println(this.graphTitle);
|
---|
3492 | paraName = new String[this.nTerms];
|
---|
3493 | if(lastMethod==38)paraName = new String[3];
|
---|
3494 | if(this.bestPolyFlag)fout.println("This is the best fit found by the method bestPolynomial");
|
---|
3495 | if(weightOpt){
|
---|
3496 | fout.println("Weighted Least Squares Minimisation");
|
---|
3497 | }
|
---|
3498 | else{
|
---|
3499 | fout.println("Unweighted Least Squares Minimisation");
|
---|
3500 | }
|
---|
3501 | switch(this.lastMethod){
|
---|
3502 | case 0: fout.println("Linear Regression with intercept");
|
---|
3503 | fout.println("y = c[0] + c[1]*x1 + c[2]*x2 +c[3]*x3 + . . .");
|
---|
3504 | for(int i=0;i<this.nTerms;i++)this.paraName[i]="c["+i+"]";
|
---|
3505 | this.linearPrint(fout);
|
---|
3506 | break;
|
---|
3507 | case 1: fout.println("Polynomial (with degree = " + (nTerms-1) + "), Fitting: Linear Regression");
|
---|
3508 | fout.println("y = c[0] + c[1]*x + c[2]*x^2 +c[3]*x^3 + . . .");
|
---|
3509 | for(int i=0;i<this.nTerms;i++)this.paraName[i]="c["+i+"]";
|
---|
3510 | this.linearPrint(fout);
|
---|
3511 | break;
|
---|
3512 | case 2: fout.println("Generalised linear regression");
|
---|
3513 | fout.println("y = c[0]*f1(x) + c[1]*f2(x) + c[2]*f3(x) + . . .");
|
---|
3514 | for(int i=0;i<this.nTerms;i++)this.paraName[i]="c["+i+"]";
|
---|
3515 | this.linearPrint(fout);
|
---|
3516 | break;
|
---|
3517 | case 3: fout.println("Nelder and Mead Simplex Non-linear Regression");
|
---|
3518 | fout.println("y = f(x1, x2, x3 . . ., c[0], c[1], c[2] . . .");
|
---|
3519 | fout.println("y is non-linear with respect to the c[i]");
|
---|
3520 | for(int i=0;i<this.nTerms;i++)this.paraName[i]="c["+i+"]";
|
---|
3521 | this.nonLinearPrint(fout);
|
---|
3522 | break;
|
---|
3523 | case 4: fout.println("Fitting to a Normal (Gaussian) distribution");
|
---|
3524 | fout.println("y = (yscale/(sd.sqrt(2.pi)).exp(0.5.square((x-mean)/sd))");
|
---|
3525 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3526 | paraName[0]="mean";
|
---|
3527 | paraName[1]="sd";
|
---|
3528 | if(this.scaleFlag)paraName[2]="y scale";
|
---|
3529 | this.nonLinearPrint(fout);
|
---|
3530 | break;
|
---|
3531 | case 5: fout.println("Fitting to a Lorentzian distribution");
|
---|
3532 | fout.println("y = (yscale/pi).(gamma/2)/((x-mean)^2+(gamma/2)^2)");
|
---|
3533 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3534 | paraName[0]="mean";
|
---|
3535 | paraName[1]="gamma";
|
---|
3536 | if(this.scaleFlag)paraName[2]="y scale";
|
---|
3537 | this.nonLinearPrint(fout);
|
---|
3538 | break;
|
---|
3539 | case 6: fout.println("Fitting to a Poisson distribution");
|
---|
3540 | fout.println("y = yscale.mu^k.exp(-mu)/mu!");
|
---|
3541 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3542 | paraName[0]="mean";
|
---|
3543 | if(this.scaleFlag)paraName[1]="y scale";
|
---|
3544 | this.nonLinearPrint(fout);
|
---|
3545 | break;
|
---|
3546 | case 7: fout.println("Fitting to a Two Parameter Minimum Order Statistic Gumbel [Type 1 Extreme Value] Distribution");
|
---|
3547 | fout.println("y = (yscale/sigma)*exp((x - mu)/sigma))*exp(-exp((x-mu)/sigma))");
|
---|
3548 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3549 | paraName[0]="mu";
|
---|
3550 | paraName[1]="sigma";
|
---|
3551 | if(this.scaleFlag)paraName[2]="y scale";
|
---|
3552 | this.nonLinearPrint(fout);
|
---|
3553 | break;
|
---|
3554 | case 8: fout.println("Fitting to a Two Parameter Maximum Order Statistic Gumbel [Type 1 Extreme Value] Distribution");
|
---|
3555 | fout.println("y = (yscale/sigma)*exp(-(x - mu)/sigma))*exp(-exp(-(x-mu)/sigma))");
|
---|
3556 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3557 | paraName[0]="mu";
|
---|
3558 | paraName[1]="sigma";
|
---|
3559 | if(this.scaleFlag)paraName[2]="y scale";
|
---|
3560 | this.nonLinearPrint(fout);
|
---|
3561 | break;
|
---|
3562 | case 9: fout.println("Fitting to a One Parameter Minimum Order Statistic Gumbel [Type 1 Extreme Value] Distribution");
|
---|
3563 | fout.println("y = (yscale)*exp(x/sigma))*exp(-exp(x/sigma))");
|
---|
3564 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3565 | paraName[0]="sigma";
|
---|
3566 | if(this.scaleFlag)paraName[1]="y scale";
|
---|
3567 | this.nonLinearPrint(fout);
|
---|
3568 | break;
|
---|
3569 | case 10: fout.println("Fitting to a One Parameter Maximum Order Statistic Gumbel [Type 1 Extreme Value] Distribution");
|
---|
3570 | fout.println("y = (yscale)*exp(-x/sigma))*exp(-exp(-x/sigma))");
|
---|
3571 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3572 | paraName[0]="sigma";
|
---|
3573 | if(this.scaleFlag)paraName[1]="y scale";
|
---|
3574 | this.nonLinearPrint(fout);
|
---|
3575 | break;
|
---|
3576 | case 11: fout.println("Fitting to a Standard Minimum Order Statistic Gumbel [Type 1 Extreme Value] Distribution");
|
---|
3577 | fout.println("y = (yscale)*exp(x))*exp(-exp(x))");
|
---|
3578 | fout.println("Linear regression used to fit y = yscale*z where z = exp(x))*exp(-exp(x)))");
|
---|
3579 | if(this.scaleFlag)paraName[0]="y scale";
|
---|
3580 | this.linearPrint(fout);
|
---|
3581 | break;
|
---|
3582 | case 12: fout.println("Fitting to a Standard Maximum Order Statistic Gumbel [Type 1 Extreme Value] Distribution");
|
---|
3583 | fout.println("y = (yscale)*exp(-x))*exp(-exp(-x))");
|
---|
3584 | fout.println("Linear regression used to fit y = yscale*z where z = exp(-x))*exp(-exp(-x)))");
|
---|
3585 | if(this.scaleFlag)paraName[0]="y scale";
|
---|
3586 | this.linearPrint(fout);
|
---|
3587 | break;
|
---|
3588 | case 13: fout.println("Fitting to a Three Parameter Frechet [Type 2 Extreme Value] Distribution");
|
---|
3589 | fout.println("y = yscale.(gamma/sigma)*((x - mu)/sigma)^(-gamma-1)*exp(-((x-mu)/sigma)^-gamma");
|
---|
3590 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3591 | paraName[0]="mu";
|
---|
3592 | paraName[1]="sigma";
|
---|
3593 | paraName[2]="gamma";
|
---|
3594 | if(this.scaleFlag)paraName[3]="y scale";
|
---|
3595 | this.nonLinearPrint(fout);
|
---|
3596 | break;
|
---|
3597 | case 14: fout.println("Fitting to a Two parameter Frechet [Type2 Extreme Value] Distribution");
|
---|
3598 | fout.println("y = yscale.(gamma/sigma)*(x/sigma)^(-gamma-1)*exp(-(x/sigma)^-gamma");
|
---|
3599 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3600 | paraName[0]="sigma";
|
---|
3601 | paraName[1]="gamma";
|
---|
3602 | if(this.scaleFlag)paraName[2]="y scale";
|
---|
3603 | this.nonLinearPrint(fout);
|
---|
3604 | break;
|
---|
3605 | case 15: fout.println("Fitting to a Standard Frechet [Type 2 Extreme Value] Distribution");
|
---|
3606 | fout.println("y = yscale.gamma*(x)^(-gamma-1)*exp(-(x)^-gamma");
|
---|
3607 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3608 | paraName[0]="gamma";
|
---|
3609 | if(this.scaleFlag)paraName[1]="y scale";
|
---|
3610 | this.nonLinearPrint(fout);
|
---|
3611 | break;
|
---|
3612 | case 16: fout.println("Fitting to a Three parameter Weibull [Type 3 Extreme Value] Distribution");
|
---|
3613 | fout.println("y = yscale.(gamma/sigma)*((x - mu)/sigma)^(gamma-1)*exp(-((x-mu)/sigma)^gamma");
|
---|
3614 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3615 | paraName[0]="mu";
|
---|
3616 | paraName[1]="sigma";
|
---|
3617 | paraName[2]="gamma";
|
---|
3618 | if(this.scaleFlag)paraName[3]="y scale";
|
---|
3619 | this.nonLinearPrint(fout);
|
---|
3620 | break;
|
---|
3621 | case 17: fout.println("Fitting to a Two parameter Weibull [Type 3 Extreme Value] Distribution");
|
---|
3622 | fout.println("y = yscale.(gamma/sigma)*(x/sigma)^(gamma-1)*exp(-(x/sigma)^gamma");
|
---|
3623 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3624 | paraName[0]="sigma";
|
---|
3625 | paraName[1]="gamma";
|
---|
3626 | if(this.scaleFlag)paraName[2]="y scale";
|
---|
3627 | this.nonLinearPrint(fout);
|
---|
3628 | break;
|
---|
3629 | case 18: fout.println("Fitting to a Standard Weibull [Type 3 Extreme Value] Distribution");
|
---|
3630 | fout.println("y = yscale.gamma*(x)^(gamma-1)*exp(-(x)^gamma");
|
---|
3631 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3632 | paraName[0]="gamma";
|
---|
3633 | if(this.scaleFlag)paraName[1]="y scale";
|
---|
3634 | this.nonLinearPrint(fout);
|
---|
3635 | break;
|
---|
3636 | case 19: fout.println("Fitting to a Two parameter Exponential Distribution");
|
---|
3637 | fout.println("y = (yscale/sigma)*exp(-(x-mu)/sigma)");
|
---|
3638 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3639 | paraName[0]="mu";
|
---|
3640 | paraName[1]="sigma";
|
---|
3641 | if(this.scaleFlag)paraName[2]="y scale";
|
---|
3642 | this.nonLinearPrint(fout);
|
---|
3643 | break;
|
---|
3644 | case 20: fout.println("Fitting to a One parameter Exponential Distribution");
|
---|
3645 | fout.println("y = (yscale/sigma)*exp(-x/sigma)");
|
---|
3646 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3647 | paraName[0]="sigma";
|
---|
3648 | if(this.scaleFlag)paraName[1]="y scale";
|
---|
3649 | this.nonLinearPrint(fout);
|
---|
3650 | break;
|
---|
3651 | case 21: fout.println("Fitting to a Standard Exponential Distribution");
|
---|
3652 | fout.println("y = yscale*exp(-x)");
|
---|
3653 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3654 | if(this.scaleFlag)paraName[0]="y scale";
|
---|
3655 | this.nonLinearPrint(fout);
|
---|
3656 | break;
|
---|
3657 | case 22: fout.println("Fitting to a Rayleigh Distribution");
|
---|
3658 | fout.println("y = (yscale/sigma)*(x/sigma)*exp(-0.5*(x/sigma)^2)");
|
---|
3659 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3660 | paraName[0]="sigma";
|
---|
3661 | if(this.scaleFlag)paraName[1]="y scale";
|
---|
3662 | this.nonLinearPrint(fout);
|
---|
3663 | break;
|
---|
3664 | case 23: fout.println("Fitting to a Two Parameter Pareto Distribution");
|
---|
3665 | fout.println("y = yscale*(alpha*beta^alpha)/(x^(alpha+1))");
|
---|
3666 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3667 | paraName[0]="alpha";
|
---|
3668 | paraName[1]="beta";
|
---|
3669 | if(this.scaleFlag)paraName[2]="y scale";
|
---|
3670 | this.nonLinearPrint(fout);
|
---|
3671 | break;
|
---|
3672 | case 24: fout.println("Fitting to a One Parameter Pareto Distribution");
|
---|
3673 | fout.println("y = yscale*(alpha)/(x^(alpha+1))");
|
---|
3674 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3675 | paraName[0]="alpha";
|
---|
3676 | if(this.scaleFlag)paraName[1]="y scale";
|
---|
3677 | this.nonLinearPrint(fout);
|
---|
3678 | break;
|
---|
3679 | case 25: fout.println("Fitting to a Sigmoidal Threshold Function");
|
---|
3680 | fout.println("y = yscale/(1 + exp(-slopeTerm(x - theta)))");
|
---|
3681 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3682 | paraName[0]="slope term";
|
---|
3683 | paraName[1]="theta";
|
---|
3684 | if(this.scaleFlag)paraName[2]="y scale";
|
---|
3685 | this.nonLinearPrint(fout);
|
---|
3686 | break;
|
---|
3687 | case 26: fout.println("Fitting to a Rectangular Hyperbola");
|
---|
3688 | fout.println("y = yscale.x/(theta + x)");
|
---|
3689 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3690 | paraName[0]="theta";
|
---|
3691 | if(this.scaleFlag)paraName[1]="y scale";
|
---|
3692 | this.nonLinearPrint(fout);
|
---|
3693 | break;
|
---|
3694 | case 27: fout.println("Fitting to a Scaled Heaviside Step Function");
|
---|
3695 | fout.println("y = yscale.H(x - theta)");
|
---|
3696 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3697 | paraName[0]="theta";
|
---|
3698 | if(this.scaleFlag)paraName[1]="y scale";
|
---|
3699 | this.nonLinearPrint(fout);
|
---|
3700 | break;
|
---|
3701 | case 28: fout.println("Fitting to a Hill/Sips Sigmoid");
|
---|
3702 | fout.println("y = yscale.x^n/(theta^n + x^n)");
|
---|
3703 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3704 | paraName[0]="theta";
|
---|
3705 | paraName[1]="n";
|
---|
3706 | if(this.scaleFlag)paraName[2]="y scale";
|
---|
3707 | this.nonLinearPrint(fout);
|
---|
3708 | break;
|
---|
3709 | case 29: fout.println("Fitting to a Shifted Pareto Distribution");
|
---|
3710 | fout.println("y = yscale*(alpha*beta^alpha)/((x-theta)^(alpha+1))");
|
---|
3711 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3712 | paraName[0]="alpha";
|
---|
3713 | paraName[1]="beta";
|
---|
3714 | paraName[2]="theta";
|
---|
3715 | if(this.scaleFlag)paraName[3]="y scale";
|
---|
3716 | this.nonLinearPrint(fout);
|
---|
3717 | break;
|
---|
3718 | case 30: fout.println("Fitting to a Logistic distribution");
|
---|
3719 | fout.println("y = yscale*exp(-(x-mu)/beta)/(beta*(1 + exp(-(x-mu)/beta))^2");
|
---|
3720 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3721 | paraName[0]="mu";
|
---|
3722 | paraName[1]="beta";
|
---|
3723 | if(this.scaleFlag)paraName[2]="y scale";
|
---|
3724 | this.nonLinearPrint(fout);
|
---|
3725 | break;
|
---|
3726 | case 31: fout.println("Fitting to a Beta distribution - [0, 1] interval");
|
---|
3727 | fout.println("y = yscale*x^(alpha-1)*(1-x)^(beta-1)/B(alpha, beta)");
|
---|
3728 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3729 | paraName[0]="alpha";
|
---|
3730 | paraName[1]="beta";
|
---|
3731 | if(this.scaleFlag)paraName[2]="y scale";
|
---|
3732 | this.nonLinearPrint(fout);
|
---|
3733 | break;
|
---|
3734 | case 32: fout.println("Fitting to a Beta distribution - [min, max] interval");
|
---|
3735 | fout.println("y = yscale*(x-min)^(alpha-1)*(max-x)^(beta-1)/(B(alpha, beta)*(max-min)^(alpha+beta-1)");
|
---|
3736 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3737 | paraName[0]="alpha";
|
---|
3738 | paraName[1]="beta";
|
---|
3739 | paraName[2]="min";
|
---|
3740 | paraName[3]="max";
|
---|
3741 | if(this.scaleFlag)paraName[4]="y scale";
|
---|
3742 | this.nonLinearPrint(fout);
|
---|
3743 | break;
|
---|
3744 | case 33: fout.println("Fitting to a Three Parameter Gamma distribution");
|
---|
3745 | fout.println("y = yscale*((x-mu)/beta)^(gamma-1)*exp(-(x-mu)/beta)/(beta*Gamma(gamma))");
|
---|
3746 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3747 | paraName[0]="mu";
|
---|
3748 | paraName[1]="beta";
|
---|
3749 | paraName[2]="gamma";
|
---|
3750 | if(this.scaleFlag)paraName[3]="y scale";
|
---|
3751 | this.nonLinearPrint(fout);
|
---|
3752 | break;
|
---|
3753 | case 34: fout.println("Fitting to a Standard Gamma distribution");
|
---|
3754 | fout.println("y = yscale*x^(gamma-1)*exp(-x)/Gamma(gamma)");
|
---|
3755 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3756 | paraName[0]="gamma";
|
---|
3757 | if(this.scaleFlag)paraName[1]="y scale";
|
---|
3758 | this.nonLinearPrint(fout);
|
---|
3759 | break;
|
---|
3760 | case 35: fout.println("Fitting to an Erang distribution");
|
---|
3761 | fout.println("y = yscale*lambda^k*x^(k-1)*exp(-x*lambda)/(k-1)!");
|
---|
3762 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3763 | paraName[0]="lambda";
|
---|
3764 | if(this.scaleFlag)paraName[1]="y scale";
|
---|
3765 | this.nonLinearPrint(fout);
|
---|
3766 | break;
|
---|
3767 | case 36: fout.println("Fitting to a two parameter log-normal distribution");
|
---|
3768 | fout.println("y = (yscale/(x.sigma.sqrt(2.pi)).exp(0.5.square((log(x)-muu)/sigma))");
|
---|
3769 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3770 | paraName[0]="mu";
|
---|
3771 | paraName[1]="sigma";
|
---|
3772 | if(this.scaleFlag)paraName[2]="y scale";
|
---|
3773 | this.nonLinearPrint(fout);
|
---|
3774 | break;
|
---|
3775 | case 37: fout.println("Fitting to a three parameter log-normal distribution");
|
---|
3776 | fout.println("y = (yscale/((x-alpha).beta.sqrt(2.pi)).exp(0.5.square((log(x-alpha)/gamma)/beta))");
|
---|
3777 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3778 | paraName[0]="alpha";
|
---|
3779 | paraName[1]="beta";
|
---|
3780 | paraName[2]="gamma";
|
---|
3781 | if(this.scaleFlag)paraName[3]="y scale";
|
---|
3782 | this.nonLinearPrint(fout);
|
---|
3783 | break;
|
---|
3784 | case 38: fout.println("Fitting to a Normal (Gaussian) distribution with fixed parameters");
|
---|
3785 | fout.println("y = (yscale/(sd.sqrt(2.pi)).exp(0.5.square((x-mean)/sd))");
|
---|
3786 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3787 | paraName[0]="mean";
|
---|
3788 | paraName[1]="sd";
|
---|
3789 | paraName[2]="y scale";
|
---|
3790 | this.nonLinearPrint(fout);
|
---|
3791 | break;
|
---|
3792 | case 39: fout.println("Fitting to a EC50 dose response curve");
|
---|
3793 | fout.println("y = bottom + (top - bottom)/(1 + (x/EC50)^HillSlope)");
|
---|
3794 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3795 | paraName[0]="bottom";
|
---|
3796 | paraName[1]="top";
|
---|
3797 | paraName[2]="EC50";
|
---|
3798 | paraName[3]="Hill Slope";
|
---|
3799 | this.nonLinearPrint(fout);
|
---|
3800 | break;
|
---|
3801 | case 40: fout.println("Fitting to a LogEC50 dose response curve");
|
---|
3802 | fout.println("y = bottom + (top - bottom)/(1 + 10^((logEC50 - x).HillSlope))");
|
---|
3803 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3804 | paraName[0]="bottom";
|
---|
3805 | paraName[1]="top";
|
---|
3806 | paraName[2]="LogEC50";
|
---|
3807 | paraName[3]="Hill Slope";
|
---|
3808 | this.nonLinearPrint(fout);
|
---|
3809 | break;
|
---|
3810 | case 41: fout.println("Fitting to a EC50 dose response curve - bottom constrained to be zero or positive");
|
---|
3811 | fout.println("y = bottom + (top - bottom)/(1 + (x/EC50)^HillSlope)");
|
---|
3812 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3813 | paraName[0]="bottom";
|
---|
3814 | paraName[1]="top";
|
---|
3815 | paraName[2]="EC50";
|
---|
3816 | paraName[3]="Hill Slope";
|
---|
3817 | this.nonLinearPrint(fout);
|
---|
3818 | break;
|
---|
3819 | case 42: fout.println("Fitting to a LogEC50 dose response curve - bottom constrained to be zero or positive");
|
---|
3820 | fout.println("y = bottom + (top - bottom)/(1 + 10^((logEC50 - x).HillSlope))");
|
---|
3821 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3822 | paraName[0]="bottom";
|
---|
3823 | paraName[1]="top";
|
---|
3824 | paraName[2]="LogEC50";
|
---|
3825 | paraName[3]="Hill Slope";
|
---|
3826 | this.nonLinearPrint(fout);
|
---|
3827 | break;
|
---|
3828 | case 43: fout.println("Fitting to an exponential");
|
---|
3829 | fout.println("y = yscale.exp(A.x)");
|
---|
3830 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3831 | paraName[0]="A";
|
---|
3832 | if(this.scaleFlag)paraName[1]="y scale";
|
---|
3833 | this.nonLinearPrint(fout);
|
---|
3834 | break;
|
---|
3835 | case 44: fout.println("Fitting to multiple exponentials");
|
---|
3836 | fout.println("y = Sum[Ai.exp(Bi.x)], i=1 to n");
|
---|
3837 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3838 | for(int i=0;i<this.nTerms;i+=2){
|
---|
3839 | this.paraName[i]="A["+(i+1)+"]";
|
---|
3840 | this.paraName[i+1]="B["+(i+1)+"]";
|
---|
3841 | }
|
---|
3842 | this.nonLinearPrint(fout);
|
---|
3843 | break;
|
---|
3844 | case 45: fout.println("Fitting to one minus an exponential");
|
---|
3845 | fout.println("y = A(1 - exp(B.x)");
|
---|
3846 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3847 | paraName[0]="A";
|
---|
3848 | paraName[1]="B";
|
---|
3849 | this.nonLinearPrint(fout);
|
---|
3850 | break;
|
---|
3851 | case 46: fout.println("Fitting to a constant");
|
---|
3852 | fout.println("y = a");
|
---|
3853 | fout.println("Stat weighted mean used to fit the data");
|
---|
3854 | paraName[0]="a";
|
---|
3855 | this.linearPrint(fout);
|
---|
3856 | break;
|
---|
3857 | case 47: fout.println("Linear Regression with fixed intercept");
|
---|
3858 | fout.println("y = fixed intercept + c[0]*x1 + c[1]*x2 +c[2]*x3 + . . . ");
|
---|
3859 | for(int i=0;i<this.nTerms;i++)this.paraName[i]="c["+i+"]";
|
---|
3860 | this.linearPrint(fout);
|
---|
3861 | break;
|
---|
3862 | case 48: fout.println("Polynomial (with degree = " + nTerms + ") and fixed intercept, Fitting: Linear Regression");
|
---|
3863 | fout.println("y = fixed intercept + c[0]*x + c[1]*x^2 +c[2]*x^3 + . . .");
|
---|
3864 | for(int i=0;i<this.nTerms;i++)this.paraName[i]="c["+i+"]";
|
---|
3865 | this.linearPrint(fout);
|
---|
3866 | break;
|
---|
3867 | case 49: fout.println("Fitting multiple Gaussian distributions");
|
---|
3868 | fout.println("y = Sum(A[i]/(sd[i].sqrt(2.pi)).exp(0.5.square((x-mean[i])/sd[i])) = yscale.Sum(f[i]/(sd[i].sqrt(2.pi)).exp(0.5.square((x-mean[i])/sd[i]))");
|
---|
3869 | fout.println("Nelder and Mead Simplex used to fit the data");
|
---|
3870 | for(int i=0; i<this.nGaussians; i++){
|
---|
3871 | paraName[3*i]="mean[" + i + "]";
|
---|
3872 | paraName[3*i+1]="sd[" + i + "]";
|
---|
3873 | paraName[3*i+2]="A[" + i + "]";
|
---|
3874 | }
|
---|
3875 | if(this.scaleFlag)paraName[3*this.nGaussians]="y scale";
|
---|
3876 | this.nonLinearPrint(fout);
|
---|
3877 | break;
|
---|
3878 | case 50: fout.println("Fitting to a non-integer polynomial");
|
---|
3879 | fout.println("y = c[0] + c[1]*x + c[2]*x^c[3]");
|
---|
3880 | for(int i=0;i<this.nTerms;i++)this.paraName[i]="c["+i+"]";
|
---|
3881 | this.nonLinearPrint(fout);
|
---|
3882 | break;
|
---|
3883 | default: throw new IllegalArgumentException("Method number (this.lastMethod) not found");
|
---|
3884 |
|
---|
3885 | }
|
---|
3886 |
|
---|
3887 | fout.close();
|
---|
3888 | }
|
---|
3889 |
|
---|
3890 | // Print the results of the regression
|
---|
3891 | // No file name provided
|
---|
3892 | public void print(){
|
---|
3893 | String filename="RegressOutput.txt";
|
---|
3894 | this.print(filename);
|
---|
3895 | }
|
---|
3896 |
|
---|
3897 | // protected method - print linear regression output
|
---|
3898 | protected void linearPrint(FileOutput fout){
|
---|
3899 |
|
---|
3900 | if(this.legendCheck){
|
---|
3901 | fout.println();
|
---|
3902 | fout.println("x1 = " + this.xLegend);
|
---|
3903 | fout.println("y = " + this.yLegend);
|
---|
3904 | }
|
---|
3905 |
|
---|
3906 | fout.println();
|
---|
3907 | if(this.lastMethod==47)fout.println("Fixed Intercept = " + this.fixedInterceptL);
|
---|
3908 | if(this.lastMethod==48)fout.println("Fixed Intercept = " + this.fixedInterceptP);
|
---|
3909 | fout.printtab(" ", this.field);
|
---|
3910 | fout.printtab("Best", this.field);
|
---|
3911 | fout.printtab("Error", this.field);
|
---|
3912 | fout.printtab("Coefficient of", this.field);
|
---|
3913 | fout.printtab("t-value ", this.field);
|
---|
3914 | fout.println("p-value");
|
---|
3915 |
|
---|
3916 | fout.printtab(" ", this.field);
|
---|
3917 | fout.printtab("Estimate", this.field);
|
---|
3918 | fout.printtab(" ", this.field);
|
---|
3919 | fout.printtab("variation (%)", this.field);
|
---|
3920 | fout.printtab("t ", this.field);
|
---|
3921 | fout.println("P > |t|");
|
---|
3922 |
|
---|
3923 | for(int i=0; i<this.nTerms; i++){
|
---|
3924 | fout.printtab(this.paraName[i], this.field);
|
---|
3925 | fout.printtab(Fmath.truncate(best[i],this.prec), this.field);
|
---|
3926 | fout.printtab(Fmath.truncate(bestSd[i],this.prec), this.field);
|
---|
3927 | fout.printtab(Fmath.truncate(Math.abs(bestSd[i]*100.0D/best[i]),this.prec), this.field);
|
---|
3928 | fout.printtab(Fmath.truncate(tValues[i],this.prec), this.field);
|
---|
3929 | fout.println(Fmath.truncate((pValues[i]),this.prec));
|
---|
3930 | }
|
---|
3931 | fout.println();
|
---|
3932 |
|
---|
3933 | int ii=0;
|
---|
3934 | if(this.lastMethod<2)ii=1;
|
---|
3935 | for(int i=0; i<this.nXarrays; i++){
|
---|
3936 | fout.printtab("x"+String.valueOf(i+ii), this.field);
|
---|
3937 | }
|
---|
3938 | fout.printtab("y(expl)", this.field);
|
---|
3939 | fout.printtab("y(calc)", this.field);
|
---|
3940 | fout.printtab("weight", this.field);
|
---|
3941 | fout.printtab("residual", this.field);
|
---|
3942 | fout.println("residual");
|
---|
3943 |
|
---|
3944 | for(int i=0; i<this.nXarrays; i++){
|
---|
3945 | fout.printtab(" ", this.field);
|
---|
3946 | }
|
---|
3947 | fout.printtab(" ", this.field);
|
---|
3948 | fout.printtab(" ", this.field);
|
---|
3949 | fout.printtab(" ", this.field);
|
---|
3950 | fout.printtab("(unweighted)", this.field);
|
---|
3951 | fout.println("(weighted)");
|
---|
3952 |
|
---|
3953 |
|
---|
3954 | for(int i=0; i<this.nData; i++){
|
---|
3955 | for(int j=0; j<this.nXarrays; j++){
|
---|
3956 | fout.printtab(Fmath.truncate(this.xData[j][i],this.prec), this.field);
|
---|
3957 | }
|
---|
3958 | fout.printtab(Fmath.truncate(this.yData[i],this.prec), this.field);
|
---|
3959 | fout.printtab(Fmath.truncate(this.yCalc[i],this.prec), this.field);
|
---|
3960 | fout.printtab(Fmath.truncate(this.weight[i],this.prec), this.field);
|
---|
3961 | fout.printtab(Fmath.truncate(this.residual[i],this.prec), this.field);
|
---|
3962 | fout.println(Fmath.truncate(this.residualW[i],this.prec));
|
---|
3963 | }
|
---|
3964 | fout.println();
|
---|
3965 | fout.println("Sum of squares " + Fmath.truncate(this.sumOfSquaresError, this.prec));
|
---|
3966 | if(this.trueFreq){
|
---|
3967 | fout.printtab("Chi Square (Poissonian bins)");
|
---|
3968 | fout.println(Fmath.truncate(this.chiSquare,this.prec));
|
---|
3969 | fout.printtab("Reduced Chi Square (Poissonian bins)");
|
---|
3970 | fout.println(Fmath.truncate(this.reducedChiSquare,this.prec));
|
---|
3971 | fout.printtab("Chi Square (Poissonian bins) Probability");
|
---|
3972 | fout.println(Fmath.truncate((1.0D-Stat.chiSquareProb(this.chiSquare, this.nData-this.nXarrays)),this.prec));
|
---|
3973 | }
|
---|
3974 | else{
|
---|
3975 | if(weightOpt){
|
---|
3976 | fout.printtab("Chi Square");
|
---|
3977 | fout.println(Fmath.truncate(this.chiSquare,this.prec));
|
---|
3978 | fout.printtab("Reduced Chi Square");
|
---|
3979 | fout.println(Fmath.truncate(this.reducedChiSquare,this.prec));
|
---|
3980 | }
|
---|
3981 | }
|
---|
3982 | fout.println(" ");
|
---|
3983 | if(this.lastMethod!=46){
|
---|
3984 | if(this.nXarrays==1 && this.nYarrays==1 && this.lastMethod!=47 && this.lastMethod!=48){
|
---|
3985 | fout.println("Correlation: x - y data");
|
---|
3986 | fout.printtab(this.weightWord[this.weightFlag] + "Linear Correlation Coefficient (R)");
|
---|
3987 | fout.println(Fmath.truncate(this.xyR,this.prec));
|
---|
3988 | if(this.xyR<=1.0D){
|
---|
3989 | fout.printtab(this.weightWord[this.weightFlag] + "Linear Correlation Coefficient Probability");
|
---|
3990 | fout.println(Fmath.truncate(Stat.linearCorrCoeffProb(this.xyR, this.nData-2),this.prec));
|
---|
3991 | }
|
---|
3992 | }
|
---|
3993 |
|
---|
3994 | fout.println(" ");
|
---|
3995 | fout.println("Correlation: y(experimental) - y(calculated)");
|
---|
3996 | fout.printtab(this.weightWord[this.weightFlag] + "Linear Correlation Coefficient");
|
---|
3997 | fout.println(Fmath.truncate(this.yyR, this.prec));
|
---|
3998 | fout.printtab(this.weightWord[this.weightFlag] + "Linear Correlation Coefficient Probability");
|
---|
3999 | fout.println(Fmath.truncate(Stat.linearCorrCoeffProb(this.yyR, this.nData-2),this.prec));
|
---|
4000 |
|
---|
4001 | fout.println();
|
---|
4002 | if(this.chiSquare!=0.0D){
|
---|
4003 | fout.println("Correlation coefficients");
|
---|
4004 | fout.printtab(" ", this.field);
|
---|
4005 | for(int i=0; i<this.nTerms;i++){
|
---|
4006 | fout.printtab(paraName[i], this.field);
|
---|
4007 | }
|
---|
4008 | fout.println();
|
---|
4009 |
|
---|
4010 | for(int j=0; j<this.nTerms;j++){
|
---|
4011 | fout.printtab(paraName[j], this.field);
|
---|
4012 | for(int i=0; i<this.nTerms;i++){
|
---|
4013 | fout.printtab(Fmath.truncate(this.corrCoeff[i][j], this.prec), this.field);
|
---|
4014 | }
|
---|
4015 | fout.println();
|
---|
4016 | }
|
---|
4017 | }
|
---|
4018 | }
|
---|
4019 |
|
---|
4020 | fout.println(" ");
|
---|
4021 | fout.printtab("Degrees of freedom");
|
---|
4022 | fout.println(this.nData - this.nTerms);
|
---|
4023 | fout.printtab("Number of data points");
|
---|
4024 | fout.println(this.nData);
|
---|
4025 | fout.printtab("Number of estimated paramaters");
|
---|
4026 | fout.println(this.nTerms);
|
---|
4027 | fout.println();
|
---|
4028 |
|
---|
4029 | if(this.bestPolyFlag){
|
---|
4030 |
|
---|
4031 | fout.println("Method bestPolynomial search history");
|
---|
4032 | fout.println("F-probability significance level (%): " + this.fProbSignificance*100.0);
|
---|
4033 | fout.println("Degree of best fit polynomial " + this.bestPolynomialDegree);
|
---|
4034 | fout.println(" ");
|
---|
4035 | fout.print("Polynomial degree", 2*field);
|
---|
4036 | fout.print("chi square", 2*field);
|
---|
4037 | fout.print("F-ratio", field);
|
---|
4038 | fout.println("F-probability");
|
---|
4039 | fout.print("comparison", 2*field);
|
---|
4040 | fout.print("comparison", 2*field);
|
---|
4041 | fout.print(" ", field);
|
---|
4042 | fout.println(" ");
|
---|
4043 |
|
---|
4044 | int nAttempts = (Integer)this.bestPolyArray.get(1);
|
---|
4045 | System.out.println((Integer)this.bestPolyArray.get(0) + " " + nAttempts);
|
---|
4046 | int[] deg0s = (int[])this.bestPolyArray.get(2);
|
---|
4047 | int[] deg1s = (int[])this.bestPolyArray.get(3);
|
---|
4048 | double[] chi0s = (double[])this.bestPolyArray.get(4);
|
---|
4049 | double[] chi1s = (double[])this.bestPolyArray.get(5);
|
---|
4050 | double[] fRatios = (double[])this.bestPolyArray.get(6);
|
---|
4051 | double[] fProbs = (double[])this.bestPolyArray.get(7);
|
---|
4052 |
|
---|
4053 | for(int i=0; i<nAttempts; i++){
|
---|
4054 | fout.print(deg0s[i], field);
|
---|
4055 | fout.print(deg1s[i], field);
|
---|
4056 | fout.print(Fmath.truncate(chi0s[i], this.prec), field);
|
---|
4057 | fout.print(Fmath.truncate(chi1s[i], this.prec), field);
|
---|
4058 | fout.print(Fmath.truncate(fRatios[i], this.prec), field);
|
---|
4059 | fout.println(Fmath.truncate(fProbs[i], this.prec));
|
---|
4060 | }
|
---|
4061 |
|
---|
4062 | }
|
---|
4063 |
|
---|
4064 | fout.println();
|
---|
4065 | fout.println("Coefficient of determination, = " + Fmath.truncate(this.multR, this.prec));
|
---|
4066 | fout.println("Adjusted Coefficient of determination, = " + Fmath.truncate(this.adjustedR, this.prec));
|
---|
4067 | fout.println("Coefficient of determination, F-ratio = " + Fmath.truncate(this.multipleF, this.prec));
|
---|
4068 | fout.println("Coefficient of determination, F-ratio probability = " + Fmath.truncate(this.multipleFprob, this.prec));
|
---|
4069 | fout.println("Total (weighted) sum of squares = " + Fmath.truncate(this.sumOfSquaresTotal, this.prec));
|
---|
4070 | fout.println("Regression (weighted) sum of squares = " + Fmath.truncate(this.sumOfSquaresRegrn, this.prec));
|
---|
4071 | fout.println("Error (weighted) sum of squares = " + Fmath.truncate(this.chiSquare, this.prec));
|
---|
4072 |
|
---|
4073 | fout.println();
|
---|
4074 | fout.println("End of file");
|
---|
4075 |
|
---|
4076 | fout.close();
|
---|
4077 | }
|
---|
4078 |
|
---|
4079 |
|
---|
4080 | // protected method - print non-linear regression output
|
---|
4081 | protected void nonLinearPrint(FileOutput fout){
|
---|
4082 | if(this.userSupplied){
|
---|
4083 | fout.println();
|
---|
4084 | fout.println("Initial estimates were supplied by the user");
|
---|
4085 | }
|
---|
4086 | else{
|
---|
4087 | fout.println("Initial estimates were calculated internally");
|
---|
4088 | }
|
---|
4089 |
|
---|
4090 | switch(this.scaleOpt){
|
---|
4091 | case 1: fout.println();
|
---|
4092 | fout.println("Initial estimates were scaled to unity within the regression");
|
---|
4093 | break;
|
---|
4094 | case 2: fout.println();
|
---|
4095 | fout.println("Initial estimates were scaled with user supplied scaling factors within the regression");
|
---|
4096 | break;
|
---|
4097 | }
|
---|
4098 |
|
---|
4099 | if(this.legendCheck){
|
---|
4100 | fout.println();
|
---|
4101 | fout.println("x1 = " + this.xLegend);
|
---|
4102 | fout.println("y = " + this.yLegend);
|
---|
4103 | }
|
---|
4104 |
|
---|
4105 | fout.println();
|
---|
4106 | if(!this.nlrStatus){
|
---|
4107 | fout.println("Convergence criterion was not satisfied");
|
---|
4108 | fout.println("The following results are, or are derived from, the current estimates on exiting the regression method");
|
---|
4109 | fout.println();
|
---|
4110 | }
|
---|
4111 |
|
---|
4112 | fout.println("Estimated parameters");
|
---|
4113 | fout.println("The statistics are obtained assuming that the model behaves as a linear model about the minimum.");
|
---|
4114 | fout.println("The Hessian matrix is calculated as the numerically derived second derivatives of chi square with respect to all pairs of parameters.");
|
---|
4115 | if(this.zeroCheck)fout.println("The best estimate/s equal to zero were replaced by the step size in the numerical differentiation!!!");
|
---|
4116 | fout.println("Consequentlty treat the statistics with great caution");
|
---|
4117 | if(!this.posVarFlag){
|
---|
4118 | fout.println("Covariance matrix contains at least one negative diagonal element");
|
---|
4119 | fout.println(" - all variances are dubious");
|
---|
4120 | fout.println(" - may not be at a minimum or the model may be so non-linear that the linear approximation in calculating the statisics is invalid");
|
---|
4121 | }
|
---|
4122 | if(!this.invertFlag){
|
---|
4123 | fout.println("Hessian matrix is singular");
|
---|
4124 | fout.println(" - variances cannot be calculated");
|
---|
4125 | fout.println(" - may not be at a minimum or the model may be so non-linear that the linear approximation in calculating the statisics is invalid");
|
---|
4126 | }
|
---|
4127 |
|
---|
4128 | fout.println(" ");
|
---|
4129 | if(!this.scaleFlag){
|
---|
4130 | fout.println("The ordinate scaling factor [yscale, Ao] has been set equal to " + this.yScaleFactor);
|
---|
4131 | fout.println(" ");
|
---|
4132 | }
|
---|
4133 | if(lastMethod==35){
|
---|
4134 | fout.println("The integer rate parameter, k, was varied in unit steps to obtain a minimum sum of squares");
|
---|
4135 | fout.println("This value of k was " + this.kayValue);
|
---|
4136 | fout.println(" ");
|
---|
4137 | }
|
---|
4138 |
|
---|
4139 | fout.printtab(" ", this.field);
|
---|
4140 | if(this.invertFlag){
|
---|
4141 | fout.printtab("Best", this.field);
|
---|
4142 | fout.printtab("Estimate of", this.field);
|
---|
4143 | fout.printtab("Coefficient", this.field);
|
---|
4144 | fout.printtab("t-value", this.field);
|
---|
4145 | fout.println("p-value");
|
---|
4146 | }
|
---|
4147 | else{
|
---|
4148 | fout.println("Best");
|
---|
4149 | }
|
---|
4150 |
|
---|
4151 | if(this.invertFlag){
|
---|
4152 | fout.printtab(" ", this.field);
|
---|
4153 | fout.printtab("estimate", this.field);
|
---|
4154 | fout.printtab("the error", this.field);
|
---|
4155 | fout.printtab("of", this.field);
|
---|
4156 | fout.printtab("t", this.field);
|
---|
4157 | fout.println("P > |t|");
|
---|
4158 | }
|
---|
4159 | else{
|
---|
4160 | fout.printtab(" ", this.field);
|
---|
4161 | fout.println("estimate");
|
---|
4162 | }
|
---|
4163 |
|
---|
4164 | if(this.invertFlag){
|
---|
4165 | fout.printtab(" ", this.field);
|
---|
4166 | fout.printtab(" ", this.field);
|
---|
4167 | fout.printtab(" ", this.field);
|
---|
4168 | fout.println("variation (%)");
|
---|
4169 | }
|
---|
4170 | else{
|
---|
4171 | fout.println(" ");
|
---|
4172 | }
|
---|
4173 |
|
---|
4174 | if(this.lastMethod==38){
|
---|
4175 | int nT = 3;
|
---|
4176 | int ii = 0;
|
---|
4177 | for(int i=0; i<nT; i++){
|
---|
4178 | fout.printtab(this.paraName[i], this.field);
|
---|
4179 | if(this.fixed[i]){
|
---|
4180 | fout.printtab(this.values[i]);
|
---|
4181 | fout.println(" fixed parameter");
|
---|
4182 | }
|
---|
4183 | else{
|
---|
4184 | if(this.invertFlag){
|
---|
4185 | fout.printtab(Fmath.truncate(best[ii],this.prec), this.field);
|
---|
4186 | fout.printtab(Fmath.truncate(bestSd[ii],this.prec), this.field);
|
---|
4187 | fout.printtab(Fmath.truncate(Math.abs(bestSd[ii]*100.0D/best[ii]),this.prec), this.field);
|
---|
4188 | fout.printtab(Fmath.truncate(tValues[ii],this.prec), this.field);
|
---|
4189 | fout.println(Fmath.truncate(pValues[ii],this.prec));
|
---|
4190 | }
|
---|
4191 | else{
|
---|
4192 | fout.println(Fmath.truncate(best[ii],this.prec));
|
---|
4193 | }
|
---|
4194 | ii++;
|
---|
4195 | }
|
---|
4196 | }
|
---|
4197 | }
|
---|
4198 | else{
|
---|
4199 | for(int i=0; i<this.nTerms; i++){
|
---|
4200 | if(this.invertFlag){
|
---|
4201 | fout.printtab(this.paraName[i], this.field);
|
---|
4202 | fout.printtab(Fmath.truncate(best[i],this.prec), this.field);
|
---|
4203 | fout.printtab(Fmath.truncate(bestSd[i],this.prec), this.field);
|
---|
4204 | fout.printtab(Fmath.truncate(Math.abs(bestSd[i]*100.0D/best[i]),this.prec), this.field);
|
---|
4205 | fout.printtab(Fmath.truncate(tValues[i],this.prec), this.field);
|
---|
4206 | fout.println(Fmath.truncate(pValues[i],this.prec));
|
---|
4207 | }
|
---|
4208 | else{
|
---|
4209 | fout.printtab(this.paraName[i], this.field);
|
---|
4210 | fout.println(Fmath.truncate(best[i],this.prec));
|
---|
4211 | }
|
---|
4212 | }
|
---|
4213 | }
|
---|
4214 | fout.println();
|
---|
4215 |
|
---|
4216 | fout.printtab(" ", this.field);
|
---|
4217 | fout.printtab("Best", this.field);
|
---|
4218 | fout.printtab("Pre-min", this.field);
|
---|
4219 | fout.printtab("Post-min", this.field);
|
---|
4220 | fout.printtab("Initial", this.field);
|
---|
4221 | fout.printtab("Fractional", this.field);
|
---|
4222 | fout.println("Scaling");
|
---|
4223 |
|
---|
4224 | fout.printtab(" ", this.field);
|
---|
4225 | fout.printtab("estimate", this.field);
|
---|
4226 | fout.printtab("gradient", this.field);
|
---|
4227 | fout.printtab("gradient", this.field);
|
---|
4228 | fout.printtab("estimate", this.field);
|
---|
4229 | fout.printtab("step", this.field);
|
---|
4230 | fout.println("factor");
|
---|
4231 |
|
---|
4232 |
|
---|
4233 | if(this.lastMethod==38){
|
---|
4234 | int nT = 3;
|
---|
4235 | int ii = 0;
|
---|
4236 | for(int i=0; i<nT; i++){
|
---|
4237 | fout.printtab(this.paraName[i], this.field);
|
---|
4238 | if(this.fixed[i]){
|
---|
4239 | fout.printtab(this.values[i]);
|
---|
4240 | fout.println(" fixed parameter");
|
---|
4241 | }
|
---|
4242 | else{
|
---|
4243 | fout.printtab(Fmath.truncate(best[ii],this.prec), this.field);
|
---|
4244 | fout.printtab(Fmath.truncate(this.grad[ii][0],this.prec), this.field);
|
---|
4245 | fout.printtab(Fmath.truncate(this.grad[ii][1],this.prec), this.field);
|
---|
4246 | fout.printtab(Fmath.truncate(this.startH[ii],this.prec), this.field);
|
---|
4247 | fout.printtab(Fmath.truncate(this.stepH[ii],this.prec), this.field);
|
---|
4248 | fout.println(Fmath.truncate(this.scale[ii],this.prec));
|
---|
4249 | ii++;
|
---|
4250 | }
|
---|
4251 | }
|
---|
4252 | }
|
---|
4253 | else{
|
---|
4254 | for(int i=0; i<this.nTerms; i++){
|
---|
4255 | fout.printtab(this.paraName[i], this.field);
|
---|
4256 | fout.printtab(Fmath.truncate(best[i],this.prec), this.field);
|
---|
4257 | fout.printtab(Fmath.truncate(this.grad[i][0],this.prec), this.field);
|
---|
4258 | fout.printtab(Fmath.truncate(this.grad[i][1],this.prec), this.field);
|
---|
4259 | fout.printtab(Fmath.truncate(this.startH[i],this.prec), this.field);
|
---|
4260 | fout.printtab(Fmath.truncate(this.stepH[i],this.prec), this.field);
|
---|
4261 | fout.println(Fmath.truncate(this.scale[i],this.prec));
|
---|
4262 | }
|
---|
4263 | }
|
---|
4264 | fout.println();
|
---|
4265 |
|
---|
4266 |
|
---|
4267 |
|
---|
4268 | ErrorProp ePeak = null;
|
---|
4269 | ErrorProp eYscale = null;
|
---|
4270 | if(this.scaleFlag){
|
---|
4271 | switch(this.lastMethod){
|
---|
4272 | case 4: ErrorProp eSigma = new ErrorProp(best[1], bestSd[1]);
|
---|
4273 | eYscale = new ErrorProp(best[2]/Math.sqrt(2.0D*Math.PI), bestSd[2]/Math.sqrt(2.0D*Math.PI));
|
---|
4274 | ePeak = eYscale.over(eSigma);
|
---|
4275 | fout.printsp("Calculated estimate of the peak value = ");
|
---|
4276 | fout.println(ErrorProp.truncate(ePeak, prec));
|
---|
4277 | break;
|
---|
4278 | case 5: ErrorProp eGamma = new ErrorProp(best[1], bestSd[1]);
|
---|
4279 | eYscale = new ErrorProp(2.0D*best[2]/Math.PI, 2.0D*bestSd[2]/Math.PI);
|
---|
4280 | ePeak = eYscale.over(eGamma);
|
---|
4281 | fout.printsp("Calculated estimate of the peak value = ");
|
---|
4282 | fout.println(ErrorProp.truncate(ePeak, prec));
|
---|
4283 | break;
|
---|
4284 |
|
---|
4285 | }
|
---|
4286 | }
|
---|
4287 | if(this.lastMethod==25){
|
---|
4288 | fout.printsp("Calculated estimate of the maximum gradient = ");
|
---|
4289 | if(this.scaleFlag){
|
---|
4290 | fout.println(Fmath.truncate(best[0]*best[2]/4.0D, prec));
|
---|
4291 | }
|
---|
4292 | else{
|
---|
4293 | fout.println(Fmath.truncate(best[0]*this.yScaleFactor/4.0D, prec));
|
---|
4294 | }
|
---|
4295 |
|
---|
4296 | }
|
---|
4297 | if(this.lastMethod==28){
|
---|
4298 | fout.printsp("Calculated estimate of the maximum gradient = ");
|
---|
4299 | if(this.scaleFlag){
|
---|
4300 | fout.println(Fmath.truncate(best[1]*best[2]/(4.0D*best[0]), prec));
|
---|
4301 | }
|
---|
4302 | else{
|
---|
4303 | fout.println(Fmath.truncate(best[1]*this.yScaleFactor/(4.0D*best[0]), prec));
|
---|
4304 | }
|
---|
4305 | fout.printsp("Calculated estimate of the Ka, i.e. theta raised to the power n = ");
|
---|
4306 | fout.println(Fmath.truncate(Math.pow(best[0], best[1]), prec));
|
---|
4307 | }
|
---|
4308 | fout.println();
|
---|
4309 |
|
---|
4310 | if(this.lastMethod==49){
|
---|
4311 | fout.println("A[i] values converted to fractional contributions, f[i], and a scaling factor, yscale");
|
---|
4312 | fout.printtab(" ", this.field);
|
---|
4313 | if(this.invertFlag){
|
---|
4314 | fout.printtab("Best", this.field);
|
---|
4315 | fout.printtab("Estimate of", this.field);
|
---|
4316 | fout.printtab("Coefficient", this.field);
|
---|
4317 | fout.printtab("t-value", this.field);
|
---|
4318 | fout.println("p-value");
|
---|
4319 | }
|
---|
4320 | else{
|
---|
4321 | fout.println("Best");
|
---|
4322 | }
|
---|
4323 |
|
---|
4324 | if(this.invertFlag){
|
---|
4325 | fout.printtab(" ", this.field);
|
---|
4326 | fout.printtab("estimate", this.field);
|
---|
4327 | fout.printtab("the error", this.field);
|
---|
4328 | fout.printtab("of", this.field);
|
---|
4329 | fout.printtab("t", this.field);
|
---|
4330 | fout.println("P > |t|");
|
---|
4331 | }
|
---|
4332 | else{
|
---|
4333 | fout.printtab(" ", this.field);
|
---|
4334 | fout.println("estimate");
|
---|
4335 | }
|
---|
4336 |
|
---|
4337 | if(this.invertFlag){
|
---|
4338 | fout.printtab(" ", this.field);
|
---|
4339 | fout.printtab(" ", this.field);
|
---|
4340 | fout.printtab(" ", this.field);
|
---|
4341 | fout.println("variation (%)");
|
---|
4342 | }
|
---|
4343 | else{
|
---|
4344 | fout.println(" ");
|
---|
4345 | }
|
---|
4346 |
|
---|
4347 | for(int i=0; i<this.nGaussians; i++){
|
---|
4348 | if(this.invertFlag){
|
---|
4349 | fout.printtab("f[" + i + "]", this.field);
|
---|
4350 | fout.printtab(Fmath.truncate(this.multGaussFract[i],this.prec), this.field);
|
---|
4351 | fout.printtab(Fmath.truncate(this.multGaussFractErrors[i],this.prec), this.field);
|
---|
4352 | fout.printtab(Fmath.truncate(this.multGaussCoeffVar[i],this.prec), this.field);
|
---|
4353 | fout.printtab(Fmath.truncate(this.multGaussTvalue[i],this.prec), this.field);
|
---|
4354 | fout.println(Fmath.truncate(this.multGaussPvalue[i],this.prec));
|
---|
4355 | }
|
---|
4356 | else{
|
---|
4357 | fout.printtab("f[" + i + "]", this.field);
|
---|
4358 | fout.println(Fmath.truncate(this.multGaussFract[i],this.prec));
|
---|
4359 | }
|
---|
4360 | }
|
---|
4361 | }
|
---|
4362 | if(this.invertFlag){
|
---|
4363 | fout.printtab("yscale", this.field);
|
---|
4364 | fout.printtab(Fmath.truncate(this.multGaussScale,this.prec), this.field);
|
---|
4365 | fout.printtab(Fmath.truncate(this.multGaussScaleError,this.prec), this.field);
|
---|
4366 | fout.printtab(Fmath.truncate(this.multGaussScaleCoeffVar,this.prec), this.field);
|
---|
4367 | fout.printtab(Fmath.truncate(this.multGaussScaleTvalue,this.prec), this.field);
|
---|
4368 | fout.println(Fmath.truncate(this.multGaussScalePvalue,this.prec));
|
---|
4369 | }
|
---|
4370 | else{
|
---|
4371 | fout.printtab("yscale", this.field);
|
---|
4372 | fout.println(Fmath.truncate(this.multGaussScale,this.prec));
|
---|
4373 | }
|
---|
4374 | fout.println();
|
---|
4375 |
|
---|
4376 | int kk=0;
|
---|
4377 | for(int j=0; j<nYarrays; j++){
|
---|
4378 | if(this.multipleY)fout.println("Y array " + j);
|
---|
4379 |
|
---|
4380 | for(int i=0; i<this.nXarrays; i++){
|
---|
4381 | fout.printtab("x"+String.valueOf(i), this.field);
|
---|
4382 | }
|
---|
4383 |
|
---|
4384 | fout.printtab("y(expl)", this.field);
|
---|
4385 | fout.printtab("y(calc)", this.field);
|
---|
4386 | fout.printtab("weight", this.field);
|
---|
4387 | fout.printtab("residual", this.field);
|
---|
4388 | fout.println("residual");
|
---|
4389 |
|
---|
4390 | for(int i=0; i<this.nXarrays; i++){
|
---|
4391 | fout.printtab(" ", this.field);
|
---|
4392 | }
|
---|
4393 | fout.printtab(" ", this.field);
|
---|
4394 | fout.printtab(" ", this.field);
|
---|
4395 | fout.printtab(" ", this.field);
|
---|
4396 | fout.printtab("(unweighted)", this.field);
|
---|
4397 | fout.println("(weighted)");
|
---|
4398 | for(int i=0; i<this.nData0; i++){
|
---|
4399 | for(int jj=0; jj<this.nXarrays; jj++){
|
---|
4400 | fout.printtab(Fmath.truncate(this.xData[jj][kk],this.prec), this.field);
|
---|
4401 | }
|
---|
4402 | fout.printtab(Fmath.truncate(this.yData[kk],this.prec), this.field);
|
---|
4403 | fout.printtab(Fmath.truncate(this.yCalc[kk],this.prec), this.field);
|
---|
4404 | fout.printtab(Fmath.truncate(this.weight[kk],this.prec), this.field);
|
---|
4405 | fout.printtab(Fmath.truncate(this.residual[kk],this.prec), this.field);
|
---|
4406 | fout.println(Fmath.truncate(this.residualW[kk],this.prec));
|
---|
4407 | kk++;
|
---|
4408 | }
|
---|
4409 | fout.println();
|
---|
4410 | }
|
---|
4411 |
|
---|
4412 | fout.printtab("Sum of squares of the unweighted residuals");
|
---|
4413 | fout.println(Fmath.truncate(this.sumOfSquaresError,this.prec));
|
---|
4414 | if(this.trueFreq){
|
---|
4415 | fout.printtab("Chi Square (Poissonian bins)");
|
---|
4416 | fout.println(Fmath.truncate(this.chiSquare,this.prec));
|
---|
4417 | fout.printtab("Reduced Chi Square (Poissonian bins)");
|
---|
4418 | fout.println(Fmath.truncate(this.reducedChiSquare,this.prec));
|
---|
4419 | fout.printtab("Chi Square (Poissonian bins) Probability");
|
---|
4420 | fout.println(Fmath.truncate(1.0D-Stat.chiSquareProb(this.reducedChiSquare,this.degreesOfFreedom),this.prec));
|
---|
4421 | }
|
---|
4422 | else{
|
---|
4423 | if(weightOpt){
|
---|
4424 | fout.printtab("Chi Square");
|
---|
4425 | fout.println(Fmath.truncate(this.chiSquare,this.prec));
|
---|
4426 | fout.printtab("Reduced Chi Square");
|
---|
4427 | fout.println(Fmath.truncate(this.reducedChiSquare,this.prec));
|
---|
4428 | }
|
---|
4429 | }
|
---|
4430 |
|
---|
4431 | fout.println(" ");
|
---|
4432 |
|
---|
4433 | if(this.nXarrays==1 && this.nYarrays==1){
|
---|
4434 | fout.println("Correlation: x - y data");
|
---|
4435 | fout.printtab(this.weightWord[this.weightFlag] + "Linear Correlation Coefficient (R)");
|
---|
4436 | fout.println(Fmath.truncate(this.xyR,this.prec));
|
---|
4437 | if(this.xyR<=1.0D){
|
---|
4438 | fout.printtab(this.weightWord[this.weightFlag] + "Linear Correlation Coefficient Probability");
|
---|
4439 | fout.println(Fmath.truncate(Stat.linearCorrCoeffProb(this.xyR, this.nData-2),this.prec));
|
---|
4440 | }
|
---|
4441 | }
|
---|
4442 |
|
---|
4443 | fout.println(" ");
|
---|
4444 | fout.println("Correlation: y(experimental) - y(calculated)");
|
---|
4445 | fout.printtab(this.weightWord[this.weightFlag] + "Linear Correlation Coefficient");
|
---|
4446 | fout.println(Fmath.truncate(this.yyR, this.prec));
|
---|
4447 | fout.printtab(this.weightWord[this.weightFlag] + "Linear Correlation Coefficient Probability");
|
---|
4448 | fout.println(Fmath.truncate(Stat.linearCorrCoeffProb(this.yyR, this.nData-2),this.prec));
|
---|
4449 |
|
---|
4450 | fout.println(" ");
|
---|
4451 | fout.printtab("Degrees of freedom");
|
---|
4452 | fout.println(this.degreesOfFreedom);
|
---|
4453 | fout.printtab("Number of data points");
|
---|
4454 | fout.println(this.nData);
|
---|
4455 | fout.printtab("Number of estimated paramaters");
|
---|
4456 | fout.println(this.nTerms);
|
---|
4457 |
|
---|
4458 | fout.println();
|
---|
4459 |
|
---|
4460 | if(this.posVarFlag && this.invertFlag && this.chiSquare!=0.0D){
|
---|
4461 | fout.println("Parameter - parameter correlation coefficients");
|
---|
4462 | fout.printtab(" ", this.field);
|
---|
4463 | for(int i=0; i<this.nTerms;i++){
|
---|
4464 | fout.printtab(paraName[i], this.field);
|
---|
4465 | }
|
---|
4466 | fout.println();
|
---|
4467 |
|
---|
4468 | for(int j=0; j<this.nTerms;j++){
|
---|
4469 | fout.printtab(paraName[j], this.field);
|
---|
4470 | for(int i=0; i<this.nTerms;i++){
|
---|
4471 | fout.printtab(Fmath.truncate(this.corrCoeff[i][j], this.prec), this.field);
|
---|
4472 | }
|
---|
4473 | fout.println();
|
---|
4474 | }
|
---|
4475 | fout.println();
|
---|
4476 | }
|
---|
4477 |
|
---|
4478 |
|
---|
4479 | fout.println();
|
---|
4480 | fout.println("Coefficient of determination, R = " + Fmath.truncate(this.multR, this.prec));
|
---|
4481 | fout.println("Adjusted Coefficient of determination, R' = " + Fmath.truncate(this.adjustedR, this.prec));
|
---|
4482 | fout.println("Coefficient of determination, F-ratio = " + Fmath.truncate(this.multipleF, this.prec));
|
---|
4483 | fout.println("Coefficient of determination, F-ratio probability = " + Fmath.truncate(this.multipleFprob, this.prec));
|
---|
4484 | fout.println("Total (weighted) sum of squares = " + Fmath.truncate(this.sumOfSquaresTotal, this.prec));
|
---|
4485 | fout.println("Regression (weighted) sum of squares = " + Fmath.truncate(this.sumOfSquaresRegrn, this.prec));
|
---|
4486 | fout.println("Error (weighted) sum of squares = " + Fmath.truncate(this.chiSquare, this.prec));
|
---|
4487 |
|
---|
4488 | fout.println();
|
---|
4489 |
|
---|
4490 | fout.println();
|
---|
4491 | fout.printtab("Number of iterations taken");
|
---|
4492 | fout.println(this.nIter);
|
---|
4493 | fout.printtab("Maximum number of iterations allowed");
|
---|
4494 | fout.println(this.nMax);
|
---|
4495 | fout.printtab("Number of restarts taken");
|
---|
4496 | fout.println(this.kRestart);
|
---|
4497 | fout.printtab("Maximum number of restarts allowed");
|
---|
4498 | fout.println(this.konvge);
|
---|
4499 | fout.printtab("Standard deviation of the simplex at the minimum");
|
---|
4500 | fout.println(Fmath.truncate(this.simplexSd, this.prec));
|
---|
4501 | fout.printtab("Convergence tolerance");
|
---|
4502 | fout.println(this.fTol);
|
---|
4503 | switch(minTest){
|
---|
4504 | case 0: fout.println("simplex sd < the tolerance times the mean of the absolute values of the y values");
|
---|
4505 | break;
|
---|
4506 | case 1: fout.println("simplex sd < the tolerance");
|
---|
4507 | break;
|
---|
4508 | case 2: fout.println("simplex sd < the tolerance times the square root(sum of squares/degrees of freedom");
|
---|
4509 | break;
|
---|
4510 | }
|
---|
4511 | fout.println("Step used in numerical differentiation to obtain Hessian matrix");
|
---|
4512 | fout.println("d(parameter) = parameter*"+this.delta);
|
---|
4513 |
|
---|
4514 | fout.println();
|
---|
4515 | fout.println("End of file");
|
---|
4516 | fout.close();
|
---|
4517 | }
|
---|
4518 |
|
---|
4519 | // plot calculated y against experimental y
|
---|
4520 | // title provided
|
---|
4521 | public void plotYY(String title){
|
---|
4522 | this.graphTitle = title;
|
---|
4523 | int ncurves = 2;
|
---|
4524 | int npoints = this.nData0;
|
---|
4525 | double[][] data = PlotGraph.data(ncurves, npoints);
|
---|
4526 |
|
---|
4527 | int kk = 0;
|
---|
4528 | for(int jj=0; jj<this.nYarrays; jj++){
|
---|
4529 |
|
---|
4530 | // fill first curve with experimental versus best fit values
|
---|
4531 | for(int i=0; i<nData0; i++){
|
---|
4532 | data[0][i]=this.yData[kk];
|
---|
4533 | data[1][i]=this.yCalc[kk];
|
---|
4534 | kk++;
|
---|
4535 | }
|
---|
4536 |
|
---|
4537 | // Create a title
|
---|
4538 | String title0 = this.setGandPtitle(this.graphTitle);
|
---|
4539 | if(this.multipleY)title0 = title0 + "y array " + jj;
|
---|
4540 | String title1 = "Calculated versus experimental y values";
|
---|
4541 |
|
---|
4542 | // Calculate best fit straight line between experimental and best fit values
|
---|
4543 | Regression yyRegr = new Regression(this.yData, this.yCalc, this.weight);
|
---|
4544 | yyRegr.linear();
|
---|
4545 | double[] coef = yyRegr.getCoeff();
|
---|
4546 | data[2][0]=Fmath.minimum(this.yData);
|
---|
4547 | data[3][0]=coef[0]+coef[1]*data[2][0];
|
---|
4548 | data[2][1]=Fmath.maximum(this.yData);
|
---|
4549 | data[3][1]=coef[0]+coef[1]*data[2][1];
|
---|
4550 |
|
---|
4551 | PlotGraph pg = new PlotGraph(data);
|
---|
4552 | if(plotWindowCloseChoice){
|
---|
4553 | pg.setCloseChoice(2);
|
---|
4554 | }
|
---|
4555 | else{
|
---|
4556 | pg.setCloseChoice(1);
|
---|
4557 | }
|
---|
4558 |
|
---|
4559 | pg.setGraphTitle(title0);
|
---|
4560 | pg.setGraphTitle2(title1);
|
---|
4561 | pg.setXaxisLegend("Experimental y value");
|
---|
4562 | pg.setYaxisLegend("Calculated y value");
|
---|
4563 | int[] popt = {1, 0};
|
---|
4564 | pg.setPoint(popt);
|
---|
4565 | int[] lopt = {0, 3};
|
---|
4566 | pg.setLine(lopt);
|
---|
4567 |
|
---|
4568 | pg.plot();
|
---|
4569 | }
|
---|
4570 | }
|
---|
4571 |
|
---|
4572 | //Creates a title
|
---|
4573 | protected String setGandPtitle(String title){
|
---|
4574 | String title1 = "";
|
---|
4575 | switch(this.lastMethod){
|
---|
4576 | case 0: title1 = "Linear regression (with intercept): "+title;
|
---|
4577 | break;
|
---|
4578 | case 1: title1 = "Linear(polynomial with degree = " + (nTerms-1) + ") regression: "+title;
|
---|
4579 | break;
|
---|
4580 | case 2: title1 = "General linear regression: "+title;
|
---|
4581 | break;
|
---|
4582 | case 3: title1 = "Non-linear (simplex) regression: "+title;
|
---|
4583 | break;
|
---|
4584 | case 4: title1 = "Fit to a Gaussian distribution: "+title;
|
---|
4585 | break;
|
---|
4586 | case 5: title1 = "Fit to a Lorentzian distribution: "+title;
|
---|
4587 | break;
|
---|
4588 | case 6:title1 = "Fit to a Poisson distribution: "+title;
|
---|
4589 | break;
|
---|
4590 | case 7: title1 = "Fit to a Two Parameter Minimum Order Statistic Gumbel distribution: "+title;
|
---|
4591 | break;
|
---|
4592 | case 8: title1 = "Fit to a two Parameter Maximum Order Statistic Gumbel distribution: "+title;
|
---|
4593 | break;
|
---|
4594 | case 9: title1 = "Fit to a One Parameter Minimum Order Statistic Gumbel distribution: "+title;
|
---|
4595 | break;
|
---|
4596 | case 10: title1 = "Fit to a One Parameter Maximum Order Statistic Gumbel distribution: "+title;
|
---|
4597 | break;
|
---|
4598 | case 11: title1 = "Fit to a Standard Minimum Order Statistic Gumbel distribution: "+title;
|
---|
4599 | break;
|
---|
4600 | case 12: title1 = "Fit to a Standard Maximum Order Statistic Gumbel distribution: "+title;
|
---|
4601 | break;
|
---|
4602 | case 13:title1 = "Fit to a Three Parameter Frechet distribution: "+title;
|
---|
4603 | break;
|
---|
4604 | case 14:title1 = "Fit to a Two Parameter Frechet distribution: "+title;
|
---|
4605 | break;
|
---|
4606 | case 15:title1 = "Fit to a Standard Frechet distribution: "+title;
|
---|
4607 | break;
|
---|
4608 | case 16:title1 = "Fit to a Three Parameter Weibull distribution: "+title;
|
---|
4609 | break;
|
---|
4610 | case 17:title1 = "Fit to a Two Parameter Weibull distribution: "+title;
|
---|
4611 | break;
|
---|
4612 | case 18:title1 = "Fit to a Standard Weibull distribution: "+title;
|
---|
4613 | break;
|
---|
4614 | case 19:title1 = "Fit to a Two Parameter Exponential distribution: "+title;
|
---|
4615 | break;
|
---|
4616 | case 20:title1 = "Fit to a One Parameter Exponential distribution: "+title;
|
---|
4617 | break;
|
---|
4618 | case 21:title1 = "Fit to a Standard exponential distribution: "+title;
|
---|
4619 | break;
|
---|
4620 | case 22:title1 = "Fit to a Rayleigh distribution: "+title;
|
---|
4621 | break;
|
---|
4622 | case 23:title1 = "Fit to a Two Parameter Pareto distribution: "+title;
|
---|
4623 | break;
|
---|
4624 | case 24:title1 = "Fit to a One Parameter Pareto distribution: "+title;
|
---|
4625 | break;
|
---|
4626 | case 25:title1 = "Fit to a Sigmoid Threshold Function: "+title;
|
---|
4627 | break;
|
---|
4628 | case 26:title1 = "Fit to a Rectangular Hyperbola: "+title;
|
---|
4629 | break;
|
---|
4630 | case 27:title1 = "Fit to a Scaled Heaviside Step Function: "+title;
|
---|
4631 | break;
|
---|
4632 | case 28:title1 = "Fit to a Hill/Sips Sigmoid: "+title;
|
---|
4633 | break;
|
---|
4634 | case 29:title1 = "Fit to a Shifted Pareto distribution: "+title;
|
---|
4635 | break;
|
---|
4636 | case 30:title1 = "Fit to a Logistic distribution: "+title;
|
---|
4637 | break;
|
---|
4638 | case 31:title1 = "Fit to a Beta distribution - interval [0, 1]: "+title;
|
---|
4639 | break;
|
---|
4640 | case 32:title1 = "Fit to a Beta distribution - interval [min, max]: "+title;
|
---|
4641 | break;
|
---|
4642 | case 33:title1 = "Fit to a Three Parameter Gamma distribution]: "+title;
|
---|
4643 | break;
|
---|
4644 | case 34:title1 = "Fit to a Standard Gamma distribution]: "+title;
|
---|
4645 | break;
|
---|
4646 | case 35:title1 = "Fit to an Erlang distribution]: "+title;
|
---|
4647 | break;
|
---|
4648 | case 36:title1 = "Fit to an two parameter log-normal distribution]: "+title;
|
---|
4649 | break;
|
---|
4650 | case 37:title1 = "Fit to an three parameter log-normal distribution]: "+title;
|
---|
4651 | break;
|
---|
4652 | case 38: title1 = "Fit to a Gaussian distribution with fixed parameters: "+title;
|
---|
4653 | break;
|
---|
4654 | case 39: title1 = "Fit to a EC50 dose response curve: "+title;
|
---|
4655 | break;
|
---|
4656 | case 40: title1 = "Fit to a LogEC50 dose response curve: "+title;
|
---|
4657 | break;
|
---|
4658 | case 41: title1 = "Fit to a EC50 dose response curve - bottom constrained [>= 0]: "+title;
|
---|
4659 | break;
|
---|
4660 | case 42: title1 = "Fit to a LogEC50 dose response curve - bottom constrained [>= 0]: "+title;
|
---|
4661 | break;
|
---|
4662 | case 43: title1 = "Fit to an exponential yscale.exp(A.x): "+title;
|
---|
4663 | break;
|
---|
4664 | case 44: title1 = "Fit to multiple exponentials sum[Ai.exp(Bi.x)]: "+title;
|
---|
4665 | break;
|
---|
4666 | case 45: title1 = "Fit to an exponential A.(1 - exp(B.x): "+title;
|
---|
4667 | break;
|
---|
4668 | case 46: title1 = "Fit to a constant a: "+title;
|
---|
4669 | break;
|
---|
4670 | case 47: title1 = "Linear regression (with fixed intercept): "+title;
|
---|
4671 | break;
|
---|
4672 | case 48: title1 = "Linear(polynomial with degree = " + (nTerms-1) + " and fixed intercept) regression: "+title;
|
---|
4673 | break;
|
---|
4674 | case 49: title1 = "Fitting multiple Gaussian distributions";
|
---|
4675 | break;
|
---|
4676 | case 50: title1 = "Fitting to a non-integer polynomial";
|
---|
4677 | break;
|
---|
4678 |
|
---|
4679 | default: title1 = " "+title;
|
---|
4680 | }
|
---|
4681 | return title1;
|
---|
4682 | }
|
---|
4683 |
|
---|
4684 | // plot calculated y against experimental y
|
---|
4685 | // no title provided
|
---|
4686 | public void plotYY(){
|
---|
4687 | plotYY(this.graphTitle);
|
---|
4688 | }
|
---|
4689 |
|
---|
4690 | // plot experimental x against experimental y and against calculated y
|
---|
4691 | // linear regression data
|
---|
4692 | // title provided
|
---|
4693 | protected int plotXY(String title){
|
---|
4694 | this.graphTitle = title;
|
---|
4695 | int flag=0;
|
---|
4696 | if(!this.linNonLin && this.nTerms>0){
|
---|
4697 | System.out.println("You attempted to use Regression.plotXY() for a non-linear regression without providing the function reference (pointer) in the plotXY argument list");
|
---|
4698 | System.out.println("No plot attempted");
|
---|
4699 | flag=-1;
|
---|
4700 | return flag;
|
---|
4701 | }
|
---|
4702 | flag = this.plotXYlinear(title);
|
---|
4703 | return flag;
|
---|
4704 | }
|
---|
4705 |
|
---|
4706 | // plot experimental x against experimental y and against calculated y
|
---|
4707 | // Linear regression data
|
---|
4708 | // no title provided
|
---|
4709 | public int plotXY(){
|
---|
4710 | int flag = plotXY(this.graphTitle);
|
---|
4711 | return flag;
|
---|
4712 | }
|
---|
4713 |
|
---|
4714 | // plot experimental x against experimental y and against calculated y
|
---|
4715 | // non-linear regression data
|
---|
4716 | // title provided
|
---|
4717 | // matching simplex
|
---|
4718 | protected int plotXY(RegressionFunction g, String title){
|
---|
4719 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y array\nplotXY2 should have been called");
|
---|
4720 | Object regFun = (Object)g;
|
---|
4721 | int flag = this.plotXYnonlinear(regFun, title);
|
---|
4722 | return flag;
|
---|
4723 | }
|
---|
4724 |
|
---|
4725 | // plot experimental x against experimental y and against calculated y
|
---|
4726 | // non-linear regression data
|
---|
4727 | // title provided
|
---|
4728 | // matching simplex2
|
---|
4729 | protected int plotXY2(RegressionFunction2 g, String title){
|
---|
4730 | if(!this.multipleY)throw new IllegalArgumentException("This method cannot handle singly dimensioned y array\nsimplex should have been called");
|
---|
4731 | this.graphTitle = title;
|
---|
4732 | Object regFun = (Object)g;
|
---|
4733 | int flag = this.plotXYnonlinear(regFun, title);
|
---|
4734 | return flag;
|
---|
4735 | }
|
---|
4736 |
|
---|
4737 | // plot experimental x against experimental y and against calculated y
|
---|
4738 | // non-linear regression data
|
---|
4739 | // no title provided
|
---|
4740 | // matches simplex
|
---|
4741 | protected int plotXY(RegressionFunction g){
|
---|
4742 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y array\nplotXY2 should have been called");
|
---|
4743 | Object regFun = (Object)g;
|
---|
4744 | int flag = this.plotXYnonlinear(regFun, this.graphTitle);
|
---|
4745 | return flag;
|
---|
4746 | }
|
---|
4747 |
|
---|
4748 | // plot experimental x against experimental y and against calculated y
|
---|
4749 | // non-linear regression data
|
---|
4750 | // no title provided
|
---|
4751 | // matches simplex2
|
---|
4752 | protected int plotXY2(RegressionFunction2 g){
|
---|
4753 | if(!this.multipleY)throw new IllegalArgumentException("This method cannot handle singly dimensioned y array\nplotXY should have been called");
|
---|
4754 | Object regFun = (Object)g;
|
---|
4755 | int flag = this.plotXYnonlinear(regFun, this.graphTitle);
|
---|
4756 | return flag;
|
---|
4757 | }
|
---|
4758 |
|
---|
4759 | // Add legends option
|
---|
4760 | public void addLegends(){
|
---|
4761 | int ans = JOptionPane.showConfirmDialog(null, "Do you wish to add your own legends to the x and y axes", "Axis Legends", JOptionPane.YES_NO_OPTION, JOptionPane.QUESTION_MESSAGE);
|
---|
4762 | if(ans==0){
|
---|
4763 | this.xLegend = JOptionPane.showInputDialog("Type the legend for the abscissae (x-axis) [first data set]" );
|
---|
4764 | this.yLegend = JOptionPane.showInputDialog("Type the legend for the ordinates (y-axis) [second data set]" );
|
---|
4765 | this.legendCheck = true;
|
---|
4766 | }
|
---|
4767 | }
|
---|
4768 |
|
---|
4769 | // protected method for plotting experimental x against experimental y and against calculated y
|
---|
4770 | // Linear regression
|
---|
4771 | // title provided
|
---|
4772 | protected int plotXYlinear(String title){
|
---|
4773 | this.graphTitle = title;
|
---|
4774 | int flag=0; //Returned as 0 if plot data can be plotted, -1 if not, -2 if tried multiple regression plot
|
---|
4775 | if(this.nXarrays>1){
|
---|
4776 | System.out.println("You attempted to use Regression.plotXY() for a multiple regression");
|
---|
4777 | System.out.println("No plot attempted");
|
---|
4778 | flag=-2;
|
---|
4779 | return flag;
|
---|
4780 | }
|
---|
4781 |
|
---|
4782 | int ncurves = 2;
|
---|
4783 | int npoints = 200;
|
---|
4784 | if(npoints<this.nData0)npoints=this.nData0;
|
---|
4785 | if(this.lastMethod==11 || this.lastMethod==12 || this.lastMethod==21)npoints=this.nData0;
|
---|
4786 | double[][] data = PlotGraph.data(ncurves, npoints);
|
---|
4787 | double xmin =Fmath.minimum(xData[0]);
|
---|
4788 | double xmax =Fmath.maximum(xData[0]);
|
---|
4789 | double inc = (xmax - xmin)/(double)(npoints - 1);
|
---|
4790 | String title1 = " ";
|
---|
4791 | String title2 = " ";
|
---|
4792 |
|
---|
4793 | for(int i=0; i<nData0; i++){
|
---|
4794 | data[0][i] = this.xData[0][i];
|
---|
4795 | data[1][i] = this.yData[i];
|
---|
4796 | }
|
---|
4797 |
|
---|
4798 | data[2][0]=xmin;
|
---|
4799 | for(int i=1; i<npoints; i++)data[2][i] = data[2][i-1] + inc;
|
---|
4800 | if(this.nTerms==0){
|
---|
4801 | switch(this.lastMethod){
|
---|
4802 | case 11: title1 = "No regression: Minimum Order Statistic Standard Gumbel (y = exp(x)exp(-exp(x))): "+this.graphTitle;
|
---|
4803 | title2 = " points - experimental values; line - theoretical curve; no parameters to be estimated";
|
---|
4804 | if(weightOpt)title2 = title2 +"; error bars - weighting factors";
|
---|
4805 | for(int i=0; i<npoints; i++)data[3][i] = this.yCalc[i];
|
---|
4806 | break;
|
---|
4807 | case 12: title1 = "No regression: Maximum Order Statistic Standard Gumbel (y = exp(-x)exp(-exp(-x))): "+this.graphTitle;
|
---|
4808 | title2 = " points - experimental values; line - theoretical curve; no parameters to be estimated";
|
---|
4809 | if(weightOpt)title2 = title2 +"; error bars - weighting factors";
|
---|
4810 | for(int i=0; i<npoints; i++)data[3][i] = this.yCalc[i];
|
---|
4811 | break;
|
---|
4812 | case 21: title1 = "No regression: Standard Exponential (y = exp(-x)): "+this.graphTitle;
|
---|
4813 | title2 = " points - experimental values; line - theoretical curve; no parameters to be estimated";
|
---|
4814 | if(weightOpt)title2 = title2 +"; error bars - weighting factors";
|
---|
4815 | for(int i=0; i<npoints; i++)data[3][i] = this.yCalc[i];
|
---|
4816 | break;
|
---|
4817 | }
|
---|
4818 |
|
---|
4819 | }
|
---|
4820 | else{
|
---|
4821 | switch(this.lastMethod){
|
---|
4822 | case 0: title1 = "Linear regression (y = a + b.x): "+this.graphTitle;
|
---|
4823 | title2 = " points - experimental values; line - best fit curve";
|
---|
4824 | if(weightOpt)title2 = title2 +"; error bars - weighting factors";
|
---|
4825 | for(int i=0; i<npoints; i++)data[3][i] = best[0] + best[1]*data[2][i];
|
---|
4826 | break;
|
---|
4827 | case 1: title1 = "Linear (polynomial with degree = " + (nTerms-1) + ") regression: "+this.graphTitle;
|
---|
4828 | title2 = " points - experimental values; line - best fit curve";
|
---|
4829 | if(weightOpt)title2 = title2 +"; error bars - weighting factors";
|
---|
4830 | for(int i=0; i<npoints; i++){
|
---|
4831 | double sum=best[0];
|
---|
4832 | for(int j=1; j<this.nTerms; j++)sum+=best[j]*Math.pow(data[2][i],j);
|
---|
4833 | data[3][i] = sum;
|
---|
4834 | }
|
---|
4835 | break;
|
---|
4836 | case 2: title1 = "Linear regression (y = a.x): "+this.graphTitle;
|
---|
4837 | title2 = " points - experimental values; line - best fit curve";
|
---|
4838 | if(this.nXarrays==1){
|
---|
4839 | if(weightOpt)title2 = title2 +"; error bars - weighting factors";
|
---|
4840 | for(int i=0; i<npoints; i++)data[3][i] = best[0]*data[2][i];
|
---|
4841 | }
|
---|
4842 | else{
|
---|
4843 | System.out.println("Regression.plotXY(linear): lastMethod, "+lastMethod+",cannot be plotted in two dimensions");
|
---|
4844 | System.out.println("No plot attempted");
|
---|
4845 | flag=-1;
|
---|
4846 | }
|
---|
4847 | break;
|
---|
4848 | case 11: title1 = "Linear regression: Minimum Order Statistic Standard Gumbel (y = a.z where z = exp(x)exp(-exp(x))): "+this.graphTitle;
|
---|
4849 | title2 = " points - experimental values; line - best fit curve";
|
---|
4850 | if(weightOpt)title2 = title2 +"; error bars - weighting factors";
|
---|
4851 | for(int i=0; i<npoints; i++)data[3][i] = best[0]*Math.exp(data[2][i])*Math.exp(-Math.exp(data[2][i]));
|
---|
4852 | break;
|
---|
4853 | case 12: title1 = "Linear regression: Maximum Order Statistic Standard Gumbel (y = a.z where z=exp(-x)exp(-exp(-x))): "+this.graphTitle;
|
---|
4854 | title2 = " points - experimental values; line - best fit curve";
|
---|
4855 | if(weightOpt)title2 = title2 +"; error bars - weighting factors";
|
---|
4856 | for(int i=0; i<npoints; i++)data[3][i] = best[0]*Math.exp(-data[2][i])*Math.exp(-Math.exp(-data[2][i]));
|
---|
4857 | break;
|
---|
4858 | case 46: title1 = "Linear regression: Fit to a constant (y = a): "+this.graphTitle;
|
---|
4859 | title2 = " points - experimental values; line - best fit curve";
|
---|
4860 | if(weightOpt)title2 = title2 +"; error bars - weighting factors";
|
---|
4861 | for(int i=0; i<npoints; i++)data[3][i] = best[0];
|
---|
4862 | break;
|
---|
4863 | case 47: title1 = "Linear regression (y = fixed intercept + b.x): "+this.graphTitle;
|
---|
4864 | title2 = " points - experimental values; line - best fit curve";
|
---|
4865 | if(weightOpt)title2 = title2 +"; error bars - weighting factors";
|
---|
4866 | for(int i=0; i<npoints; i++)data[3][i] = this.fixedInterceptL + best[0]*data[2][i];
|
---|
4867 | break;
|
---|
4868 | case 48: title1 = "Linear (polynomial with degree = " + nTerms + ") regression: "+this.graphTitle;
|
---|
4869 | title2 = "Fixed intercept; points - experimental values; line - best fit curve";
|
---|
4870 | if(weightOpt)title2 = title2 +"; error bars - weighting factors";
|
---|
4871 | for(int i=0; i<npoints; i++){
|
---|
4872 | double sum=this.fixedInterceptP;
|
---|
4873 | for(int j=0; j<this.nTerms; j++)sum+=best[j]*Math.pow(data[2][i],j+1);
|
---|
4874 | data[3][i] = sum;
|
---|
4875 | }
|
---|
4876 | break;
|
---|
4877 |
|
---|
4878 | default: System.out.println("Regression.plotXY(linear): lastMethod, "+lastMethod+", either not recognised or cannot be plotted in two dimensions");
|
---|
4879 | System.out.println("No plot attempted");
|
---|
4880 | flag=-1;
|
---|
4881 | return flag;
|
---|
4882 | }
|
---|
4883 | }
|
---|
4884 |
|
---|
4885 | PlotGraph pg = new PlotGraph(data);
|
---|
4886 | if(plotWindowCloseChoice){
|
---|
4887 | pg.setCloseChoice(2);
|
---|
4888 | }
|
---|
4889 | else{
|
---|
4890 | pg.setCloseChoice(1);
|
---|
4891 | }
|
---|
4892 |
|
---|
4893 | pg.setGraphTitle(title1);
|
---|
4894 | pg.setGraphTitle2(title2);
|
---|
4895 | pg.setXaxisLegend(this.xLegend);
|
---|
4896 | pg.setYaxisLegend(this.yLegend);
|
---|
4897 | int[] popt = {1,0};
|
---|
4898 | pg.setPoint(popt);
|
---|
4899 | int[] lopt = {0,3};
|
---|
4900 | pg.setLine(lopt);
|
---|
4901 | if(weightOpt)pg.setErrorBars(0,this.weight);
|
---|
4902 | pg.plot();
|
---|
4903 |
|
---|
4904 | return flag;
|
---|
4905 | }
|
---|
4906 |
|
---|
4907 | // protected method for plotting experimental x against experimental y and against calculated y
|
---|
4908 | // Non-linear regression
|
---|
4909 | // title provided
|
---|
4910 | public int plotXYnonlinear(Object regFun, String title){
|
---|
4911 | this.graphTitle = title;
|
---|
4912 | RegressionFunction g1 = null;
|
---|
4913 | RegressionFunction2 g2 = null;
|
---|
4914 | if(this.multipleY){
|
---|
4915 | g2 = (RegressionFunction2)regFun;
|
---|
4916 | }
|
---|
4917 | else{
|
---|
4918 | g1 = (RegressionFunction)regFun;
|
---|
4919 | }
|
---|
4920 |
|
---|
4921 | int flag=0; //Returned as 0 if plot data can be plotted, -1 if not
|
---|
4922 |
|
---|
4923 | if(this.lastMethod<3){
|
---|
4924 | System.out.println("Regression.plotXY(non-linear): lastMethod, "+lastMethod+", either not recognised or cannot be plotted in two dimensions");
|
---|
4925 | System.out.println("No plot attempted");
|
---|
4926 | flag=-1;
|
---|
4927 | return flag;
|
---|
4928 | }
|
---|
4929 |
|
---|
4930 | if(this.nXarrays>1){
|
---|
4931 | System.out.println("Multiple Linear Regression with more than one independent variable cannot be plotted in two dimensions");
|
---|
4932 | System.out.println("plotYY() called instead of plotXY()");
|
---|
4933 | this.plotYY(title);
|
---|
4934 | flag=-2;
|
---|
4935 | }
|
---|
4936 | else{
|
---|
4937 | if(this.multipleY){
|
---|
4938 | int ncurves = 2;
|
---|
4939 | int npoints = 200;
|
---|
4940 | if(npoints<this.nData0)npoints=this.nData0;
|
---|
4941 | String title1, title2;
|
---|
4942 | int kk=0;
|
---|
4943 | double[] wWeight = new double[this.nData0];
|
---|
4944 | for(int jj=0; jj<this.nYarrays; jj++){
|
---|
4945 | double[][] data = PlotGraph.data(ncurves, npoints);
|
---|
4946 | for(int i=0; i<this.nData0; i++){
|
---|
4947 | data[0][i] = this.xData[0][kk];
|
---|
4948 | data[1][i] = this.yData[kk];
|
---|
4949 | wWeight[i] = this.weight[kk];
|
---|
4950 | kk++;
|
---|
4951 | }
|
---|
4952 | double xmin =Fmath.minimum(xData[0]);
|
---|
4953 | double xmax =Fmath.maximum(xData[0]);
|
---|
4954 | double inc = (xmax - xmin)/(double)(npoints - 1);
|
---|
4955 | data[2][0]=xmin;
|
---|
4956 | for(int i=1; i<npoints; i++)data[2][i] = data[2][i-1] + inc;
|
---|
4957 | double[] xd = new double[this.nXarrays];
|
---|
4958 | for(int i=0; i<npoints; i++){
|
---|
4959 | xd[0] = data[2][i];
|
---|
4960 | data[3][i] = g2.function(best, xd, jj*this.nData0);
|
---|
4961 | }
|
---|
4962 |
|
---|
4963 | // Create a title
|
---|
4964 | title1 = this.setGandPtitle(title);
|
---|
4965 | title2 = " points - experimental values; line - best fit curve; y data array " + jj;
|
---|
4966 | if(weightOpt)title2 = title2 +"; error bars - weighting factors";
|
---|
4967 |
|
---|
4968 | PlotGraph pg = new PlotGraph(data);
|
---|
4969 | if(plotWindowCloseChoice){
|
---|
4970 | pg.setCloseChoice(2);
|
---|
4971 | }
|
---|
4972 | else{
|
---|
4973 | pg.setCloseChoice(1);
|
---|
4974 | }
|
---|
4975 |
|
---|
4976 | pg.setGraphTitle(title1);
|
---|
4977 | pg.setGraphTitle2(title2);
|
---|
4978 | pg.setXaxisLegend(this.xLegend);
|
---|
4979 | pg.setYaxisLegend(this.yLegend);
|
---|
4980 | int[] popt = {1,0};
|
---|
4981 | pg.setPoint(popt);
|
---|
4982 | int[] lopt = {0,3};
|
---|
4983 | pg.setLine(lopt);
|
---|
4984 | if(weightOpt)pg.setErrorBars(0,wWeight);
|
---|
4985 |
|
---|
4986 | pg.plot();
|
---|
4987 | }
|
---|
4988 | }
|
---|
4989 | else{
|
---|
4990 | int ncurves = 2;
|
---|
4991 | int npoints = 200;
|
---|
4992 | if(npoints<this.nData0)npoints=this.nData0;
|
---|
4993 | if(this.lastMethod==6)npoints=this.nData0;
|
---|
4994 | String title1, title2;
|
---|
4995 | double[][] data = PlotGraph.data(ncurves, npoints);
|
---|
4996 | for(int i=0; i<this.nData0; i++){
|
---|
4997 | data[0][i] = this.xData[0][i];
|
---|
4998 | data[1][i] = this.yData[i];
|
---|
4999 | }
|
---|
5000 | if(this.lastMethod==6){
|
---|
5001 | double[] xd = new double[this.nXarrays];
|
---|
5002 | for(int i=0; i<npoints; i++){
|
---|
5003 | data[2][i]=data[0][i];
|
---|
5004 | xd[0] = data[2][i];
|
---|
5005 | data[3][i] = g1.function(best, xd);
|
---|
5006 | }
|
---|
5007 | }
|
---|
5008 | else{
|
---|
5009 | double xmin =Fmath.minimum(xData[0]);
|
---|
5010 | double xmax =Fmath.maximum(xData[0]);
|
---|
5011 | double inc = (xmax - xmin)/(double)(npoints - 1);
|
---|
5012 | data[2][0]=xmin;
|
---|
5013 | for(int i=1; i<npoints; i++)data[2][i] = data[2][i-1] + inc;
|
---|
5014 | double[] xd = new double[this.nXarrays];
|
---|
5015 | for(int i=0; i<npoints; i++){
|
---|
5016 | xd[0] = data[2][i];
|
---|
5017 | data[3][i] = g1.function(best, xd);
|
---|
5018 | }
|
---|
5019 | }
|
---|
5020 |
|
---|
5021 | // Create a title
|
---|
5022 | title1 = this.setGandPtitle(title);
|
---|
5023 | title2 = " points - experimental values; line - best fit curve";
|
---|
5024 | if(weightOpt)title2 = title2 +"; error bars - weighting factors";
|
---|
5025 |
|
---|
5026 | PlotGraph pg = new PlotGraph(data);
|
---|
5027 | if(plotWindowCloseChoice){
|
---|
5028 | pg.setCloseChoice(2);
|
---|
5029 | }
|
---|
5030 | else{
|
---|
5031 | pg.setCloseChoice(1);
|
---|
5032 | }
|
---|
5033 |
|
---|
5034 | pg.setGraphTitle(title1);
|
---|
5035 | pg.setGraphTitle2(title2);
|
---|
5036 | pg.setXaxisLegend(this.xLegend);
|
---|
5037 | pg.setYaxisLegend(this.yLegend);
|
---|
5038 | int[] popt = {1,0};
|
---|
5039 | pg.setPoint(popt);
|
---|
5040 | int[] lopt = {0,3};
|
---|
5041 | pg.setLine(lopt);
|
---|
5042 |
|
---|
5043 | if(weightOpt)pg.setErrorBars(0,this.weight);
|
---|
5044 |
|
---|
5045 | pg.plot();
|
---|
5046 | }
|
---|
5047 | }
|
---|
5048 | return flag;
|
---|
5049 | }
|
---|
5050 |
|
---|
5051 | // protected method for plotting experimental x against experimental y and against calculated y
|
---|
5052 | // Non-linear regression
|
---|
5053 | // all parameters fixed
|
---|
5054 | public int plotXYfixed(Object regFun, String title){
|
---|
5055 | this.graphTitle = title;
|
---|
5056 | RegressionFunction g1 = null;
|
---|
5057 | RegressionFunction2 g2 = null;
|
---|
5058 | if(this.multipleY){
|
---|
5059 | g2 = (RegressionFunction2)regFun;
|
---|
5060 | }
|
---|
5061 | else{
|
---|
5062 | g1 = (RegressionFunction)regFun;
|
---|
5063 | }
|
---|
5064 |
|
---|
5065 | int flag=0; //Returned as 0 if plot data can be plotted, -1 if not
|
---|
5066 |
|
---|
5067 | if(this.lastMethod<3){
|
---|
5068 | System.out.println("Regression.plotXY(non-linear): lastMethod, "+lastMethod+", either not recognised or cannot be plotted in two dimensions");
|
---|
5069 | System.out.println("No plot attempted");
|
---|
5070 | flag=-1;
|
---|
5071 | return flag;
|
---|
5072 | }
|
---|
5073 |
|
---|
5074 |
|
---|
5075 | if(this.nXarrays>1){
|
---|
5076 | System.out.println("Multiple Linear Regression with more than one independent variable cannot be plotted in two dimensions");
|
---|
5077 | System.out.println("plotYY() called instead of plotXY()");
|
---|
5078 | this.plotYY(title);
|
---|
5079 | flag=-2;
|
---|
5080 | }
|
---|
5081 | else{
|
---|
5082 | if(this.multipleY){
|
---|
5083 | int ncurves = 2;
|
---|
5084 | int npoints = 200;
|
---|
5085 | if(npoints<this.nData0)npoints=this.nData0;
|
---|
5086 | String title1, title2;
|
---|
5087 | int kk=0;
|
---|
5088 | double[] wWeight = new double[this.nData0];
|
---|
5089 | for(int jj=0; jj<this.nYarrays; jj++){
|
---|
5090 | double[][] data = PlotGraph.data(ncurves, npoints);
|
---|
5091 | for(int i=0; i<this.nData0; i++){
|
---|
5092 | data[0][i] = this.xData[0][kk];
|
---|
5093 | data[1][i] = this.yData[kk];
|
---|
5094 | wWeight[i] = this.weight[kk];
|
---|
5095 | kk++;
|
---|
5096 | }
|
---|
5097 | double xmin =Fmath.minimum(xData[0]);
|
---|
5098 | double xmax =Fmath.maximum(xData[0]);
|
---|
5099 | double inc = (xmax - xmin)/(double)(npoints - 1);
|
---|
5100 | data[2][0]=xmin;
|
---|
5101 | for(int i=1; i<npoints; i++)data[2][i] = data[2][i-1] + inc;
|
---|
5102 | double[] xd = new double[this.nXarrays];
|
---|
5103 | for(int i=0; i<npoints; i++){
|
---|
5104 | xd[0] = data[2][i];
|
---|
5105 | data[3][i] = g2.function(this.values, xd, jj*this.nData0);
|
---|
5106 | }
|
---|
5107 |
|
---|
5108 | // Create a title
|
---|
5109 | title1 = this.setGandPtitle(title);
|
---|
5110 | title2 = " points - experimental values; line - best fit curve; y data array " + jj;
|
---|
5111 | if(weightOpt)title2 = title2 +"; error bars - weighting factors";
|
---|
5112 |
|
---|
5113 | PlotGraph pg = new PlotGraph(data);
|
---|
5114 | if(plotWindowCloseChoice){
|
---|
5115 | pg.setCloseChoice(2);
|
---|
5116 | }
|
---|
5117 | else{
|
---|
5118 | pg.setCloseChoice(1);
|
---|
5119 | }
|
---|
5120 |
|
---|
5121 | pg.setGraphTitle(title1);
|
---|
5122 | pg.setGraphTitle2(title2);
|
---|
5123 | pg.setXaxisLegend(this.xLegend);
|
---|
5124 | pg.setYaxisLegend(this.yLegend);
|
---|
5125 | int[] popt = {1,0};
|
---|
5126 | pg.setPoint(popt);
|
---|
5127 | int[] lopt = {0,3};
|
---|
5128 | pg.setLine(lopt);
|
---|
5129 | if(weightOpt)pg.setErrorBars(0,wWeight);
|
---|
5130 |
|
---|
5131 | pg.plot();
|
---|
5132 | }
|
---|
5133 | }
|
---|
5134 | else{
|
---|
5135 | int ncurves = 2;
|
---|
5136 | int npoints = 200;
|
---|
5137 | if(npoints<this.nData0)npoints=this.nData0;
|
---|
5138 | if(this.lastMethod==6)npoints=this.nData0;
|
---|
5139 | String title1, title2;
|
---|
5140 | double[][] data = PlotGraph.data(ncurves, npoints);
|
---|
5141 | for(int i=0; i<this.nData0; i++){
|
---|
5142 | data[0][i] = this.xData[0][i];
|
---|
5143 | data[1][i] = this.yData[i];
|
---|
5144 | }
|
---|
5145 | if(this.lastMethod==6){
|
---|
5146 | double[] xd = new double[this.nXarrays];
|
---|
5147 | for(int i=0; i<npoints; i++){
|
---|
5148 | data[2][i]=data[0][i];
|
---|
5149 | xd[0] = data[2][i];
|
---|
5150 | data[3][i] = g1.function(this.values, xd);
|
---|
5151 | }
|
---|
5152 | }
|
---|
5153 | else{
|
---|
5154 | double xmin =Fmath.minimum(xData[0]);
|
---|
5155 | double xmax =Fmath.maximum(xData[0]);
|
---|
5156 | double inc = (xmax - xmin)/(double)(npoints - 1);
|
---|
5157 | data[2][0]=xmin;
|
---|
5158 | for(int i=1; i<npoints; i++)data[2][i] = data[2][i-1] + inc;
|
---|
5159 | double[] xd = new double[this.nXarrays];
|
---|
5160 | for(int i=0; i<npoints; i++){
|
---|
5161 | xd[0] = data[2][i];
|
---|
5162 | data[3][i] = g1.function(this.values, xd);
|
---|
5163 | }
|
---|
5164 | }
|
---|
5165 |
|
---|
5166 | // Create a title
|
---|
5167 | title1 = this.setGandPtitle(title);
|
---|
5168 | title2 = " points - experimental values; line - best fit curve";
|
---|
5169 | if(weightOpt)title2 = title2 +"; error bars - weighting factors";
|
---|
5170 |
|
---|
5171 | PlotGraph pg = new PlotGraph(data);
|
---|
5172 | if(plotWindowCloseChoice){
|
---|
5173 | pg.setCloseChoice(2);
|
---|
5174 | }
|
---|
5175 | else{
|
---|
5176 | pg.setCloseChoice(1);
|
---|
5177 | }
|
---|
5178 |
|
---|
5179 |
|
---|
5180 | pg.setGraphTitle(title1);
|
---|
5181 | pg.setGraphTitle2(title2);
|
---|
5182 | pg.setXaxisLegend(this.xLegend);
|
---|
5183 | pg.setYaxisLegend(this.yLegend);
|
---|
5184 | int[] popt = {1,0};
|
---|
5185 | pg.setPoint(popt);
|
---|
5186 | int[] lopt = {0,3};
|
---|
5187 | pg.setLine(lopt);
|
---|
5188 |
|
---|
5189 | if(weightOpt)pg.setErrorBars(0,this.weight);
|
---|
5190 |
|
---|
5191 | pg.plot();
|
---|
5192 | }
|
---|
5193 | }
|
---|
5194 | return flag;
|
---|
5195 | }
|
---|
5196 |
|
---|
5197 |
|
---|
5198 | // Get the non-linear regression status
|
---|
5199 | // true if convergence was achieved
|
---|
5200 | // false if convergence not achieved before maximum number of iterations
|
---|
5201 | // current values then returned
|
---|
5202 | public boolean getNlrStatus(){
|
---|
5203 | return this.nlrStatus;
|
---|
5204 | }
|
---|
5205 |
|
---|
5206 | // Reset scaling factors (scaleOpt 0 and 1, see below for scaleOpt 2)
|
---|
5207 | public void setScale(int n){
|
---|
5208 | if(n<0 || n>1)throw new IllegalArgumentException("The argument must be 0 (no scaling) 1(initial estimates all scaled to unity) or the array of scaling factors");
|
---|
5209 | this.scaleOpt=n;
|
---|
5210 | }
|
---|
5211 |
|
---|
5212 | // Reset scaling factors (scaleOpt 2, see above for scaleOpt 0 and 1)
|
---|
5213 | public void setScale(double[] sc){
|
---|
5214 | this.scale=sc;
|
---|
5215 | this.scaleOpt=2;
|
---|
5216 | }
|
---|
5217 |
|
---|
5218 | // Get scaling factors
|
---|
5219 | public double[] getScale(){
|
---|
5220 | return this.scale;
|
---|
5221 | }
|
---|
5222 |
|
---|
5223 | // Reset the non-linear regression convergence test option
|
---|
5224 | public void setMinTest(int n){
|
---|
5225 | if(n<0 || n>1)throw new IllegalArgumentException("minTest must be 0 or 1");
|
---|
5226 | this.minTest=n;
|
---|
5227 | }
|
---|
5228 |
|
---|
5229 | // Get the non-linear regression convergence test option
|
---|
5230 | public int getMinTest(){
|
---|
5231 | return this.minTest;
|
---|
5232 | }
|
---|
5233 |
|
---|
5234 | // Get the simplex sd at the minimum
|
---|
5235 | public double getSimplexSd(){
|
---|
5236 | return this.simplexSd;
|
---|
5237 | }
|
---|
5238 |
|
---|
5239 | // Get the best estimates of the unknown parameters
|
---|
5240 | public double[] getBestEstimates(){
|
---|
5241 | return Conv.copy(best);
|
---|
5242 | }
|
---|
5243 |
|
---|
5244 | // Get the best estimates of the unknown parameters
|
---|
5245 | public double[] getCoeff(){
|
---|
5246 | return Conv.copy(best);
|
---|
5247 | }
|
---|
5248 |
|
---|
5249 | // Get the estimates of the standard deviations of the best estimates of the unknown parameters
|
---|
5250 | public double[] getbestestimatesStandardDeviations(){
|
---|
5251 | return Conv.copy(bestSd);
|
---|
5252 | }
|
---|
5253 |
|
---|
5254 | // Get the estimates of the errors of the best estimates of the unknown parameters
|
---|
5255 | public double[] getBestEstimatesStandardDeviations(){
|
---|
5256 | return Conv.copy(bestSd);
|
---|
5257 | }
|
---|
5258 |
|
---|
5259 | // Get the estimates of the errors of the best estimates of the unknown parameters
|
---|
5260 | public double[] getCoeffSd(){
|
---|
5261 | return Conv.copy(bestSd);
|
---|
5262 | }
|
---|
5263 |
|
---|
5264 | // Get the estimates of the errors of the best estimates of the unknown parameters
|
---|
5265 | public double[] getBestEstimatesErrors(){
|
---|
5266 | return Conv.copy(bestSd);
|
---|
5267 | }
|
---|
5268 |
|
---|
5269 | // Get the unscaled initial estimates of the unknown parameters
|
---|
5270 | public double[] getInitialEstimates(){
|
---|
5271 | return Conv.copy(startH);
|
---|
5272 | }
|
---|
5273 |
|
---|
5274 | // Get the scaled initial estimates of the unknown parameters
|
---|
5275 | public double[] getScaledInitialEstimates(){
|
---|
5276 | return Conv.copy(startSH);
|
---|
5277 | }
|
---|
5278 |
|
---|
5279 | // Get the unscaled initial step sizes
|
---|
5280 | public double[] getInitialSteps(){
|
---|
5281 | return Conv.copy(stepH);
|
---|
5282 | }
|
---|
5283 |
|
---|
5284 | // Get the scaled initial step sizesp
|
---|
5285 | public double[] getScaledInitialSteps(){
|
---|
5286 | return Conv.copy(stepSH);
|
---|
5287 | }
|
---|
5288 |
|
---|
5289 | // Get the cofficients of variations of the best estimates of the unknown parameters
|
---|
5290 | public double[] getCoeffVar(){
|
---|
5291 | double[] coeffVar = new double[this.nTerms];
|
---|
5292 |
|
---|
5293 | for(int i=0; i<this.nTerms; i++){
|
---|
5294 | coeffVar[i]=bestSd[i]*100.0D/best[i];
|
---|
5295 | }
|
---|
5296 | return coeffVar;
|
---|
5297 | }
|
---|
5298 |
|
---|
5299 | // Get the pseudo-estimates of the errors of the best estimates of the unknown parameters
|
---|
5300 | public double[] getPseudoSd(){
|
---|
5301 | return Conv.copy(pseudoSd);
|
---|
5302 | }
|
---|
5303 |
|
---|
5304 | // Get the pseudo-estimates of the errors of the best estimates of the unknown parameters
|
---|
5305 | public double[] getPseudoErrors(){
|
---|
5306 | return Conv.copy(pseudoSd);
|
---|
5307 | }
|
---|
5308 |
|
---|
5309 | // Get the t-values of the best estimates
|
---|
5310 | public double[] getTvalues(){
|
---|
5311 | return Conv.copy(tValues);
|
---|
5312 | }
|
---|
5313 |
|
---|
5314 | // Get the p-values of the best estimates
|
---|
5315 | public double[] getPvalues(){
|
---|
5316 | return Conv.copy(pValues);
|
---|
5317 | }
|
---|
5318 |
|
---|
5319 |
|
---|
5320 | // Get the inputted x values
|
---|
5321 | public double[][] getXdata(){
|
---|
5322 | return Conv.copy(xData);
|
---|
5323 | }
|
---|
5324 |
|
---|
5325 | // Get the inputted y values
|
---|
5326 | public double[] getYdata(){
|
---|
5327 | return Conv.copy(yData);
|
---|
5328 | }
|
---|
5329 |
|
---|
5330 | // Get the calculated y values
|
---|
5331 | public double[] getYcalc(){
|
---|
5332 | double[] temp = new double[this.nData];
|
---|
5333 | for(int i=0; i<this.nData; i++)temp[i]=this.yCalc[i];
|
---|
5334 | return temp;
|
---|
5335 | }
|
---|
5336 |
|
---|
5337 | // Get the unweighted residuals, y(experimental) - y(calculated)
|
---|
5338 | public double[] getResiduals(){
|
---|
5339 | double[] temp = new double[this.nData];
|
---|
5340 | for(int i=0; i<this.nData; i++)temp[i]=this.yData[i]-this.yCalc[i];
|
---|
5341 | return temp;
|
---|
5342 | }
|
---|
5343 |
|
---|
5344 | // Get the weighted residuals, (y(experimental) - y(calculated))/weight
|
---|
5345 | public double[] getWeightedResiduals(){
|
---|
5346 | double[] temp = new double[this.nData];
|
---|
5347 | for(int i=0; i<this.nData; i++)temp[i]=(this.yData[i]-this.yCalc[i])/weight[i];
|
---|
5348 | return temp;
|
---|
5349 | }
|
---|
5350 |
|
---|
5351 | // Get the unweighted sum of squares of the residuals
|
---|
5352 | public double getSumOfSquares(){
|
---|
5353 | return this.sumOfSquaresError;
|
---|
5354 | }
|
---|
5355 |
|
---|
5356 | public double getSumOfUnweightedResidualSquares(){
|
---|
5357 | return this.sumOfSquaresError;
|
---|
5358 | }
|
---|
5359 |
|
---|
5360 | // Get the weighted sum of squares of the residuals
|
---|
5361 | // returns sum of squares if no weights have been entered
|
---|
5362 | public double getSumOfWeightedResidualSquares(){
|
---|
5363 | return this.chiSquare;
|
---|
5364 | }
|
---|
5365 |
|
---|
5366 | // Get the chi square estimate
|
---|
5367 | // returns sum of squares if no weights have been entered
|
---|
5368 | public double getChiSquare(){
|
---|
5369 | return this.chiSquare;
|
---|
5370 | }
|
---|
5371 |
|
---|
5372 | // Get the reduced chi square estimate
|
---|
5373 | // Returns reduced sum of squares if no weights have been entered
|
---|
5374 | public double getReducedChiSquare(){
|
---|
5375 | return this.reducedChiSquare;
|
---|
5376 | }
|
---|
5377 |
|
---|
5378 | // Get the total weighted sum of squares
|
---|
5379 | public double getTotalSumOfWeightedSquares(){
|
---|
5380 | return this.sumOfSquaresTotal;
|
---|
5381 | }
|
---|
5382 |
|
---|
5383 | // Get the regression weighted sum of squares
|
---|
5384 | public double getRegressionSumOfWeightedSquares(){
|
---|
5385 | return this.sumOfSquaresRegrn;
|
---|
5386 | }
|
---|
5387 |
|
---|
5388 | // Get the Coefficient of Determination
|
---|
5389 | public double getCoefficientOfDetermination(){
|
---|
5390 | return this.multR;
|
---|
5391 | }
|
---|
5392 |
|
---|
5393 | // Get the Coefficient of Determination
|
---|
5394 | // Retained for backward compatibility
|
---|
5395 | public double getSampleR(){
|
---|
5396 | return this.multR;
|
---|
5397 | }
|
---|
5398 |
|
---|
5399 | // Get the Adjusted Coefficient of Determination
|
---|
5400 | public double getAdjustedCoefficientOfDetermination(){
|
---|
5401 | return this.adjustedR;
|
---|
5402 | }
|
---|
5403 |
|
---|
5404 | // Get the Coefficient of Determination F-ratio
|
---|
5405 | public double getCoeffDeterminationFratio(){
|
---|
5406 | return this.multipleF;
|
---|
5407 | }
|
---|
5408 |
|
---|
5409 | // Get the Coefficient of Determination F-ratio probability
|
---|
5410 | public double getCoeffDeterminationFratioProb(){
|
---|
5411 | return this.multipleFprob;
|
---|
5412 | }
|
---|
5413 |
|
---|
5414 |
|
---|
5415 | // Get the covariance matrix
|
---|
5416 | public double[][] getCovMatrix(){
|
---|
5417 | return this.covar;
|
---|
5418 | }
|
---|
5419 |
|
---|
5420 | // Get the correlation coefficient matrix
|
---|
5421 | public double[][] getCorrCoeffMatrix(){
|
---|
5422 | return this.corrCoeff;
|
---|
5423 | }
|
---|
5424 |
|
---|
5425 | // Get the number of iterations in nonlinear regression
|
---|
5426 | public int getNiter(){
|
---|
5427 | return this.nIter;
|
---|
5428 | }
|
---|
5429 |
|
---|
5430 |
|
---|
5431 | // Set the maximum number of iterations allowed in nonlinear regression
|
---|
5432 | public void setNmax(int nmax){
|
---|
5433 | this.nMax = nmax;
|
---|
5434 | }
|
---|
5435 |
|
---|
5436 | // Get the maximum number of iterations allowed in nonlinear regression
|
---|
5437 | public int getNmax(){
|
---|
5438 | return this.nMax;
|
---|
5439 | }
|
---|
5440 |
|
---|
5441 | // Get the number of restarts in nonlinear regression
|
---|
5442 | public int getNrestarts(){
|
---|
5443 | return this.kRestart;
|
---|
5444 | }
|
---|
5445 |
|
---|
5446 | // Set the maximum number of restarts allowed in nonlinear regression
|
---|
5447 | public void setNrestartsMax(int nrs){
|
---|
5448 | this.konvge = nrs;
|
---|
5449 | }
|
---|
5450 |
|
---|
5451 | // Get the maximum number of restarts allowed in nonlinear regression
|
---|
5452 | public int getNrestartsMax(){
|
---|
5453 | return this.konvge;
|
---|
5454 | }
|
---|
5455 |
|
---|
5456 | // Get the degrees of freedom
|
---|
5457 | public double getDegFree(){
|
---|
5458 | return (this.degreesOfFreedom);
|
---|
5459 | }
|
---|
5460 |
|
---|
5461 | // Reset the Nelder and Mead reflection coefficient [alpha]
|
---|
5462 | public void setNMreflect(double refl){
|
---|
5463 | this.rCoeff = refl;
|
---|
5464 | }
|
---|
5465 |
|
---|
5466 | // Get the Nelder and Mead reflection coefficient [alpha]
|
---|
5467 | public double getNMreflect(){
|
---|
5468 | return this.rCoeff;
|
---|
5469 | }
|
---|
5470 |
|
---|
5471 | // Reset the Nelder and Mead extension coefficient [beta]
|
---|
5472 | public void setNMextend(double ext){
|
---|
5473 | this.eCoeff = ext;
|
---|
5474 | }
|
---|
5475 | // Get the Nelder and Mead extension coefficient [beta]
|
---|
5476 | public double getNMextend(){
|
---|
5477 | return this.eCoeff;
|
---|
5478 | }
|
---|
5479 |
|
---|
5480 | // Reset the Nelder and Mead contraction coefficient [gamma]
|
---|
5481 | public void setNMcontract(double con){
|
---|
5482 | this.cCoeff = con;
|
---|
5483 | }
|
---|
5484 |
|
---|
5485 | // Get the Nelder and Mead contraction coefficient [gamma]
|
---|
5486 | public double getNMcontract(){
|
---|
5487 | return cCoeff;
|
---|
5488 | }
|
---|
5489 |
|
---|
5490 | // Set the non-linear regression tolerance
|
---|
5491 | public void setTolerance(double tol){
|
---|
5492 | this.fTol = tol;
|
---|
5493 | }
|
---|
5494 |
|
---|
5495 |
|
---|
5496 | // Get the non-linear regression tolerance
|
---|
5497 | public double getTolerance(){
|
---|
5498 | return this.fTol;
|
---|
5499 | }
|
---|
5500 |
|
---|
5501 | // Get the non-linear regression pre and post minimum gradients
|
---|
5502 | public double[][] getGrad(){
|
---|
5503 | return this.grad;
|
---|
5504 | }
|
---|
5505 |
|
---|
5506 | // Set the non-linear regression fractional step size used in numerical differencing
|
---|
5507 | public void setDelta(double delta){
|
---|
5508 | this.delta = delta;
|
---|
5509 | }
|
---|
5510 |
|
---|
5511 | // Get the non-linear regression fractional step size used in numerical differencing
|
---|
5512 | public double getDelta(){
|
---|
5513 | return this.delta;
|
---|
5514 | }
|
---|
5515 |
|
---|
5516 | // Get the non-linear regression statistics Hessian matrix inversion status flag
|
---|
5517 | public boolean getInversionCheck(){
|
---|
5518 | return this.invertFlag;
|
---|
5519 | }
|
---|
5520 |
|
---|
5521 | // Get the non-linear regression statistics Hessian matrix inverse diagonal status flag
|
---|
5522 | public boolean getPosVarCheck(){
|
---|
5523 | return this.posVarFlag;
|
---|
5524 | }
|
---|
5525 |
|
---|
5526 |
|
---|
5527 | // Test of an additional terms {extra sum of squares]
|
---|
5528 | // return F-ratio, probability, order check and values provided in order used, as Vector
|
---|
5529 | public static Vector<Object> testOfAdditionalTerms(double chiSquareR, int nParametersR, double chiSquareF, int nParametersF, int nPoints){
|
---|
5530 | ArrayList<Object> res = Regression.testOfAdditionalTerms_ArrayList(chiSquareR, nParametersR, chiSquareF, nParametersF, nPoints);
|
---|
5531 | Vector<Object> ret = null;
|
---|
5532 | if(res!=null){
|
---|
5533 | int n = ret.size();
|
---|
5534 | ret = new Vector<Object>(n);
|
---|
5535 | for(int i=0; i<n; i++)ret.addElement(res.get(i));
|
---|
5536 | }
|
---|
5537 | return ret;
|
---|
5538 | }
|
---|
5539 |
|
---|
5540 | // Test of an additional terms {extra sum of squares]
|
---|
5541 | // return F-ratio, probability, order check and values provided in order used, as Vector
|
---|
5542 | public static Vector<Object> testOfAdditionalTerms_Vector(double chiSquareR, int nParametersR, double chiSquareF, int nParametersF, int nPoints){
|
---|
5543 | return Regression.testOfAdditionalTerms(chiSquareR, nParametersR, chiSquareF, nParametersF, nPoints);
|
---|
5544 | }
|
---|
5545 |
|
---|
5546 |
|
---|
5547 | // Test of an additional terms {extra sum of squares]
|
---|
5548 | // return F-ratio, probability, order check and values provided in order used, as ArrayList
|
---|
5549 | public static ArrayList<Object> testOfAdditionalTerms_ArrayList(double chiSquareR, int nParametersR, double chiSquareF, int nParametersF, int nPoints){
|
---|
5550 | int degFreedomR = nPoints - nParametersR;
|
---|
5551 | int degFreedomF = nPoints - nParametersF;
|
---|
5552 |
|
---|
5553 | // Check that model 2 has the lowest degrees of freedom
|
---|
5554 | boolean reversed = false;
|
---|
5555 | if(degFreedomR<degFreedomF){
|
---|
5556 | reversed = true;
|
---|
5557 | double holdD = chiSquareR;
|
---|
5558 | chiSquareR = chiSquareF;
|
---|
5559 | chiSquareF = holdD;
|
---|
5560 | int holdI = nParametersR;
|
---|
5561 | nParametersR = nParametersF;
|
---|
5562 | nParametersF = holdI;
|
---|
5563 | degFreedomR = nPoints - nParametersR;
|
---|
5564 | degFreedomF = nPoints - nParametersF;
|
---|
5565 | System.out.println("package flanagan.analysis; class Regression; method testAdditionalTerms");
|
---|
5566 | System.out.println("the order of the chi-squares has been reversed to give a second chi- square with the lowest degrees of freedom");
|
---|
5567 | }
|
---|
5568 | int degFreedomD = degFreedomR - degFreedomF;
|
---|
5569 |
|
---|
5570 | // F ratio
|
---|
5571 | double numer = (chiSquareR - chiSquareF)/degFreedomD;
|
---|
5572 | double denom = chiSquareF/degFreedomF;
|
---|
5573 | double fRatio = numer/denom;
|
---|
5574 |
|
---|
5575 | // Probability
|
---|
5576 | double fProb = 1.0D;
|
---|
5577 | if(chiSquareR>chiSquareF){
|
---|
5578 | fProb = Stat.fTestProb(fRatio, degFreedomD, degFreedomF);
|
---|
5579 | }
|
---|
5580 |
|
---|
5581 | // Return arraylist
|
---|
5582 | ArrayList<Object> arrayl = new ArrayList<Object>();
|
---|
5583 | arrayl.add(new Double(fRatio));
|
---|
5584 | arrayl.add(new Double(fProb));
|
---|
5585 | arrayl.add(new Boolean(reversed));
|
---|
5586 | arrayl.add(new Double(chiSquareR));
|
---|
5587 | arrayl.add(new Integer(nParametersR));
|
---|
5588 | arrayl.add(new Double(chiSquareF));
|
---|
5589 | arrayl.add(new Integer(nParametersF));
|
---|
5590 | arrayl.add(new Integer(nPoints));
|
---|
5591 |
|
---|
5592 | return arrayl;
|
---|
5593 | }
|
---|
5594 |
|
---|
5595 | // Test of an additional terms {extra sum of squares]
|
---|
5596 | // return F-ratio only
|
---|
5597 | public double testOfAdditionalTermsFratio(double chiSquareR, int nParametersR, double chiSquareF, int nParametersF, int nPoints){
|
---|
5598 | int degFreedomR = nPoints - nParametersR;
|
---|
5599 | int degFreedomF = nPoints - nParametersF;
|
---|
5600 |
|
---|
5601 | // Check that model 2 has the lowest degrees of freedom
|
---|
5602 | boolean reversed = false;
|
---|
5603 | if(degFreedomR<degFreedomF){
|
---|
5604 | reversed = true;
|
---|
5605 | double holdD = chiSquareR;
|
---|
5606 | chiSquareR = chiSquareF;
|
---|
5607 | chiSquareF = holdD;
|
---|
5608 | int holdI = nParametersR;
|
---|
5609 | nParametersR = nParametersF;
|
---|
5610 | nParametersF = holdI;
|
---|
5611 | degFreedomR = nPoints - nParametersR;
|
---|
5612 | degFreedomF = nPoints - nParametersF;
|
---|
5613 | System.out.println("package flanagan.analysis; class Regression; method testAdditionalTermsFratio");
|
---|
5614 | System.out.println("the order of the chi-squares has been reversed to give a second chi- square with the lowest degrees of freedom");
|
---|
5615 | }
|
---|
5616 | int degFreedomD = degFreedomR - degFreedomF;
|
---|
5617 |
|
---|
5618 | // F ratio
|
---|
5619 | double numer = (chiSquareR - chiSquareF)/degFreedomD;
|
---|
5620 | double denom = chiSquareF/degFreedomF;
|
---|
5621 | double fRatio = numer/denom;
|
---|
5622 |
|
---|
5623 | return fRatio;
|
---|
5624 | }
|
---|
5625 |
|
---|
5626 |
|
---|
5627 | // Test of an additional terms {extra sum of squares]
|
---|
5628 | // return F-distribution probablity only
|
---|
5629 | public double testOfAdditionalTermsFprobability(double chiSquareR, int nParametersR, double chiSquareF, int nParametersF, int nPoints){
|
---|
5630 | int degFreedomR = nPoints - nParametersR;
|
---|
5631 | int degFreedomF = nPoints - nParametersF;
|
---|
5632 |
|
---|
5633 | // Check that model 2 has the lowest degrees of freedom
|
---|
5634 | boolean reversed = false;
|
---|
5635 | if(degFreedomR<degFreedomF){
|
---|
5636 | reversed = true;
|
---|
5637 | double holdD = chiSquareR;
|
---|
5638 | chiSquareR = chiSquareF;
|
---|
5639 | chiSquareF = holdD;
|
---|
5640 | int holdI = nParametersR;
|
---|
5641 | nParametersR = nParametersF;
|
---|
5642 | nParametersF = holdI;
|
---|
5643 | degFreedomR = nPoints - nParametersR;
|
---|
5644 | degFreedomF = nPoints - nParametersF;
|
---|
5645 | System.out.println("package flanagan.analysis; class Regression; method testAdditionalTermsFprobability");
|
---|
5646 | System.out.println("the order of the chi-squares has been reversed to give a second chi- square with the lowest degrees of freedom");
|
---|
5647 | }
|
---|
5648 | int degFreedomD = degFreedomR - degFreedomF;
|
---|
5649 |
|
---|
5650 | // F ratio
|
---|
5651 | double numer = (chiSquareR - chiSquareF)/degFreedomD;
|
---|
5652 | double denom = chiSquareF/degFreedomF;
|
---|
5653 | double fRatio = numer/denom;
|
---|
5654 |
|
---|
5655 | // Probability
|
---|
5656 | double fProb = 1.0D;
|
---|
5657 | if(chiSquareR>chiSquareF){
|
---|
5658 | fProb = Stat.fTestProb(fRatio, degFreedomD, degFreedomF);
|
---|
5659 | }
|
---|
5660 |
|
---|
5661 | return fProb;
|
---|
5662 | }
|
---|
5663 |
|
---|
5664 | public double testOfAdditionalTermsFprobabilty(double chiSquareR, int nParametersR, double chiSquareF, int nParametersF, int nPoints){
|
---|
5665 | return testOfAdditionalTermsFprobability(chiSquareR, nParametersR, chiSquareF, nParametersF, nPoints);
|
---|
5666 | }
|
---|
5667 |
|
---|
5668 |
|
---|
5669 | // FIT TO SPECIAL FUNCTIONS
|
---|
5670 | // Fit to a Poisson distribution
|
---|
5671 | public void poisson(){
|
---|
5672 | this.userSupplied = false;
|
---|
5673 | this.fitPoisson(0);
|
---|
5674 | }
|
---|
5675 |
|
---|
5676 | // Fit to a Poisson distribution
|
---|
5677 | public void poissonPlot(){
|
---|
5678 | this.userSupplied = false;
|
---|
5679 | this.fitPoisson(1);
|
---|
5680 | }
|
---|
5681 |
|
---|
5682 | protected void fitPoisson(int plotFlag){
|
---|
5683 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
5684 | this.lastMethod=6;
|
---|
5685 | this.linNonLin = false;
|
---|
5686 | this.zeroCheck = false;
|
---|
5687 | this.nTerms=2;
|
---|
5688 | if(!this.scaleFlag)this.nTerms=2;
|
---|
5689 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
5690 | if(this.degreesOfFreedom<1)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
5691 |
|
---|
5692 | // Check all abscissae are integers
|
---|
5693 | for(int i=0; i<this.nData; i++){
|
---|
5694 | if(xData[0][i]-Math.floor(xData[0][i])!=0.0D)throw new IllegalArgumentException("all abscissae must be, mathematically, integer values");
|
---|
5695 | }
|
---|
5696 |
|
---|
5697 | // Calculate x value at peak y (estimate of the distribution mode)
|
---|
5698 | ArrayList<Object> ret1 = Regression.dataSign(yData);
|
---|
5699 | Double tempd = null;
|
---|
5700 | Integer tempi = null;
|
---|
5701 | tempi = (Integer)ret1.get(5);
|
---|
5702 | int peaki = tempi.intValue();
|
---|
5703 | double mean = xData[0][peaki];
|
---|
5704 |
|
---|
5705 | // Calculate peak value
|
---|
5706 | tempd = (Double)ret1.get(4);
|
---|
5707 | double peak = tempd.doubleValue();
|
---|
5708 |
|
---|
5709 | // Fill arrays needed by the Simplex
|
---|
5710 | double[] start = new double[this.nTerms];
|
---|
5711 | double[] step = new double[this.nTerms];
|
---|
5712 | start[0] = mean;
|
---|
5713 | if(this.scaleFlag){
|
---|
5714 | start[1] = peak/(Math.exp(mean*Math.log(mean)-Stat.logFactorial(mean))*Math.exp(-mean));
|
---|
5715 | }
|
---|
5716 | step[0] = 0.1D*start[0];
|
---|
5717 | if(step[0]==0.0D){
|
---|
5718 | ArrayList<Object> ret0 = Regression.dataSign(xData[0]);
|
---|
5719 | Double tempdd = null;
|
---|
5720 | tempdd = (Double)ret0.get(2);
|
---|
5721 | double xmax = tempdd.doubleValue();
|
---|
5722 | if(xmax==0.0D){
|
---|
5723 | tempdd = (Double)ret0.get(0);
|
---|
5724 | xmax = tempdd.doubleValue();
|
---|
5725 | }
|
---|
5726 | step[0]=xmax*0.1D;
|
---|
5727 | }
|
---|
5728 | if(this.scaleFlag)step[1] = 0.1D*start[1];
|
---|
5729 |
|
---|
5730 | // Nelder and Mead Simplex Regression
|
---|
5731 | PoissonFunction f = new PoissonFunction();
|
---|
5732 | this.addConstraint(1,-1,0.0D);
|
---|
5733 | f.scaleOption = this.scaleFlag;
|
---|
5734 | f.scaleFactor = this.yScaleFactor;
|
---|
5735 |
|
---|
5736 | Object regFun2 = (Object) f;
|
---|
5737 | this.nelderMead(regFun2, start, step, this.fTol, this.nMax);
|
---|
5738 |
|
---|
5739 | if(plotFlag==1){
|
---|
5740 | // Print results
|
---|
5741 | if(!this.supressPrint)this.print();
|
---|
5742 | // Plot results
|
---|
5743 | this.plotOpt=false;
|
---|
5744 | int flag = this.plotXY(f);
|
---|
5745 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
5746 | }
|
---|
5747 | }
|
---|
5748 |
|
---|
5749 |
|
---|
5750 | // FIT TO A NORMAL (GAUSSIAN) DISTRIBUTION
|
---|
5751 |
|
---|
5752 | // Fit to a Gaussian
|
---|
5753 | public void gaussian(){
|
---|
5754 | this.userSupplied = false;
|
---|
5755 | this.fitGaussian(0);
|
---|
5756 | }
|
---|
5757 |
|
---|
5758 | public void normal(){
|
---|
5759 | this.userSupplied = false;
|
---|
5760 | this.fitGaussian(0);
|
---|
5761 | }
|
---|
5762 |
|
---|
5763 | // Fit to a Gaussian
|
---|
5764 | public void gaussianPlot(){
|
---|
5765 | this.userSupplied = false;
|
---|
5766 | this.fitGaussian(1);
|
---|
5767 | }
|
---|
5768 |
|
---|
5769 | // Fit to a Gaussian
|
---|
5770 | public void normalPlot(){
|
---|
5771 | this.userSupplied = false;
|
---|
5772 | this.fitGaussian(1);
|
---|
5773 | }
|
---|
5774 |
|
---|
5775 | // Fit data to a Gaussian (normal) probability function
|
---|
5776 | protected void fitGaussian(int plotFlag){
|
---|
5777 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
5778 | this.lastMethod=4;
|
---|
5779 | this.linNonLin = false;
|
---|
5780 | this.zeroCheck = false;
|
---|
5781 | this.nTerms=3;
|
---|
5782 | if(!this.scaleFlag)this.nTerms=2;
|
---|
5783 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
5784 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
5785 |
|
---|
5786 | // order data into ascending order of the abscissae
|
---|
5787 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
5788 |
|
---|
5789 | // check sign of y data
|
---|
5790 | Double tempd=null;
|
---|
5791 | ArrayList<Object> retY = Regression.dataSign(yData);
|
---|
5792 | tempd = (Double)retY.get(4);
|
---|
5793 | double yPeak = tempd.doubleValue();
|
---|
5794 | boolean yFlag = false;
|
---|
5795 | if(yPeak<0.0D){
|
---|
5796 | System.out.println("Regression.fitGaussian(): This implementation of the Gaussian distribution takes only positive y values\n(noise taking low values below zero are allowed)");
|
---|
5797 | System.out.println("All y values have been multiplied by -1 before fitting");
|
---|
5798 | for(int i =0; i<this.nData; i++){
|
---|
5799 | yData[i] = -yData[i];
|
---|
5800 | }
|
---|
5801 | retY = Regression.dataSign(yData);
|
---|
5802 | yFlag=true;
|
---|
5803 | }
|
---|
5804 |
|
---|
5805 | // Calculate x value at peak y (estimate of the Gaussian mean)
|
---|
5806 | ArrayList<Object> ret1 = Regression.dataSign(yData);
|
---|
5807 | Integer tempi = null;
|
---|
5808 | tempi = (Integer)ret1.get(5);
|
---|
5809 | int peaki = tempi.intValue();
|
---|
5810 | double mean = xData[0][peaki];
|
---|
5811 |
|
---|
5812 | // Calculate an estimate of the sd
|
---|
5813 | double sd = Math.sqrt(2.0D)*halfWidth(xData[0], yData);
|
---|
5814 |
|
---|
5815 | // Calculate estimate of y scale
|
---|
5816 | tempd = (Double)ret1.get(4);
|
---|
5817 | double ym = tempd.doubleValue();
|
---|
5818 | ym=ym*sd*Math.sqrt(2.0D*Math.PI);
|
---|
5819 |
|
---|
5820 | // Fill arrays needed by the Simplex
|
---|
5821 | double[] start = new double[this.nTerms];
|
---|
5822 | double[] step = new double[this.nTerms];
|
---|
5823 | start[0] = mean;
|
---|
5824 | start[1] = sd;
|
---|
5825 | if(this.scaleFlag){
|
---|
5826 | start[2] = ym;
|
---|
5827 | }
|
---|
5828 | step[0] = 0.1D*sd;
|
---|
5829 | step[1] = 0.1D*start[1];
|
---|
5830 | if(step[1]==0.0D){
|
---|
5831 | ArrayList<Object> ret0 = Regression.dataSign(xData[0]);
|
---|
5832 | Double tempdd = null;
|
---|
5833 | tempdd = (Double)ret0.get(2);
|
---|
5834 | double xmax = tempdd.doubleValue();
|
---|
5835 | if(xmax==0.0D){
|
---|
5836 | tempdd = (Double)ret0.get(0);
|
---|
5837 | xmax = tempdd.doubleValue();
|
---|
5838 | }
|
---|
5839 | step[1]=xmax*0.1D;
|
---|
5840 | }
|
---|
5841 | if(this.scaleFlag)step[2] = 0.1D*start[2];
|
---|
5842 |
|
---|
5843 | // Nelder and Mead Simplex Regression
|
---|
5844 | GaussianFunction f = new GaussianFunction();
|
---|
5845 | this.addConstraint(1,-1, 0.0D);
|
---|
5846 | f.scaleOption = this.scaleFlag;
|
---|
5847 | f.scaleFactor = this.yScaleFactor;
|
---|
5848 |
|
---|
5849 | Object regFun2 = (Object)f;
|
---|
5850 | this.nelderMead(regFun2, start, step, this.fTol, this.nMax);
|
---|
5851 |
|
---|
5852 | if(plotFlag==1){
|
---|
5853 | // Print results
|
---|
5854 | if(!this.supressPrint)this.print();
|
---|
5855 |
|
---|
5856 | // Plot results
|
---|
5857 | int flag = this.plotXY(f);
|
---|
5858 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
5859 | }
|
---|
5860 |
|
---|
5861 | if(yFlag){
|
---|
5862 | // restore data
|
---|
5863 | for(int i=0; i<this.nData-1; i++){
|
---|
5864 | this.yData[i]=-this.yData[i];
|
---|
5865 | }
|
---|
5866 | }
|
---|
5867 |
|
---|
5868 | }
|
---|
5869 |
|
---|
5870 | // Fit data to a Gaussian (normal) probability function
|
---|
5871 | // with option to fix some of the parameters
|
---|
5872 | // parameter order - mean, sd, scale factor
|
---|
5873 | public void gaussian(double[] initialEstimates, boolean[] fixed){
|
---|
5874 | this.userSupplied = true;
|
---|
5875 | this.fitGaussianFixed(initialEstimates, fixed, 0);
|
---|
5876 | }
|
---|
5877 |
|
---|
5878 | // Fit to a Gaussian
|
---|
5879 | // with option to fix some of the parameters
|
---|
5880 | // parameter order - mean, sd, scale factor
|
---|
5881 | public void normal(double[] initialEstimates, boolean[] fixed){
|
---|
5882 | this.userSupplied = true;
|
---|
5883 | this.fitGaussianFixed(initialEstimates, fixed, 0);
|
---|
5884 | }
|
---|
5885 |
|
---|
5886 | // Fit to a Gaussian
|
---|
5887 | // with option to fix some of the parameters
|
---|
5888 | // parameter order - mean, sd, scale factor
|
---|
5889 | public void gaussianPlot(double[] initialEstimates, boolean[] fixed){
|
---|
5890 | this.userSupplied = true;
|
---|
5891 | this.fitGaussianFixed(initialEstimates, fixed, 1);
|
---|
5892 | }
|
---|
5893 |
|
---|
5894 | // Fit to a Gaussian
|
---|
5895 | // with option to fix some of the parameters
|
---|
5896 | // parameter order - mean, sd, scale factor
|
---|
5897 | public void normalPlot(double[] initialEstimates, boolean[] fixed){
|
---|
5898 | this.userSupplied = true;
|
---|
5899 | this.fitGaussianFixed(initialEstimates, fixed, 1);
|
---|
5900 | }
|
---|
5901 |
|
---|
5902 |
|
---|
5903 | // Fit data to a Gaussian (normal) probability function
|
---|
5904 | // with option to fix some of the parameters
|
---|
5905 | // parameter order - mean, sd, scale factor
|
---|
5906 | protected void fitGaussianFixed(double[] initialEstimates, boolean[] fixed, int plotFlag){
|
---|
5907 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
5908 | this.lastMethod=38;
|
---|
5909 | this.values = initialEstimates;
|
---|
5910 | this.fixed = fixed;
|
---|
5911 | this.scaleFlag=true;
|
---|
5912 | this.linNonLin = false;
|
---|
5913 | this.zeroCheck = false;
|
---|
5914 | this.nTerms=3;
|
---|
5915 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
5916 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
5917 |
|
---|
5918 | // order data into ascending order of the abscissae
|
---|
5919 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
5920 |
|
---|
5921 | // check sign of y data
|
---|
5922 | Double tempd=null;
|
---|
5923 | ArrayList<Object> retY = Regression.dataSign(yData);
|
---|
5924 | tempd = (Double)retY.get(4);
|
---|
5925 | double yPeak = tempd.doubleValue();
|
---|
5926 | boolean yFlag = false;
|
---|
5927 | if(yPeak<0.0D){
|
---|
5928 | System.out.println("Regression.fitGaussian(): This implementation of the Gaussian distribution takes only positive y values\n(noise taking low values below zero are allowed)");
|
---|
5929 | System.out.println("All y values have been multiplied by -1 before fitting");
|
---|
5930 | for(int i =0; i<this.nData; i++){
|
---|
5931 | yData[i] = -yData[i];
|
---|
5932 | }
|
---|
5933 | retY = Regression.dataSign(yData);
|
---|
5934 | yFlag=true;
|
---|
5935 | }
|
---|
5936 |
|
---|
5937 | // Create instance of GaussianFunctionFixed
|
---|
5938 | GaussianFunctionFixed f = new GaussianFunctionFixed();
|
---|
5939 | f.fixed = fixed;
|
---|
5940 | f.param = initialEstimates;
|
---|
5941 |
|
---|
5942 | // Determine unknowns
|
---|
5943 | int nT = this.nTerms;
|
---|
5944 | for(int i=0; i<this.nTerms; i++)if(fixed[i])nT--;
|
---|
5945 | if(nT==0){
|
---|
5946 | if(plotFlag==0){
|
---|
5947 | throw new IllegalArgumentException("At least one parameter must be available for variation by the Regression procedure or GauasianPlot should have been called and not Gaussian");
|
---|
5948 | }
|
---|
5949 | else{
|
---|
5950 | plotFlag = 3;
|
---|
5951 | }
|
---|
5952 | }
|
---|
5953 |
|
---|
5954 | double[] start = new double[nT];
|
---|
5955 | double[] step = new double[nT];
|
---|
5956 | boolean[] constraint = new boolean[nT];
|
---|
5957 |
|
---|
5958 | // Fill arrays needed by the Simplex
|
---|
5959 | double xMin = Fmath.minimum(xData[0]);
|
---|
5960 | double xMax = Fmath.maximum(xData[0]);
|
---|
5961 | double yMax = Fmath.maximum(yData);
|
---|
5962 | if(initialEstimates[2]==0.0D){
|
---|
5963 | if(fixed[2]){
|
---|
5964 | throw new IllegalArgumentException("Scale factor has been fixed at zero");
|
---|
5965 | }
|
---|
5966 | else{
|
---|
5967 | initialEstimates[2] = yMax;
|
---|
5968 | }
|
---|
5969 | }
|
---|
5970 | int ii = 0;
|
---|
5971 | for(int i=0; i<this.nTerms; i++){
|
---|
5972 | if(!fixed[i]){
|
---|
5973 | start[ii] = initialEstimates[i];
|
---|
5974 | step[ii] = start[ii]*0.1D;
|
---|
5975 | if(step[ii]==0.0D)step[ii] = (xMax - xMin)*0.1D;
|
---|
5976 | constraint[ii] = false;
|
---|
5977 | if(i==1)constraint[ii] = true;
|
---|
5978 | ii++;
|
---|
5979 | }
|
---|
5980 | }
|
---|
5981 | this.nTerms = nT;
|
---|
5982 |
|
---|
5983 | // Nelder and Mead Simplex Regression
|
---|
5984 | for(int i=0; i<this.nTerms; i++){
|
---|
5985 | if(constraint[i])this.addConstraint(i,-1, 0.0D);
|
---|
5986 | }
|
---|
5987 | Object regFun2 = (Object)f;
|
---|
5988 | if(plotFlag!=3)this.nelderMead(regFun2, start, step, this.fTol, this.nMax);
|
---|
5989 |
|
---|
5990 | if(plotFlag==1){
|
---|
5991 | // Print results
|
---|
5992 | if(!this.supressPrint)this.print();
|
---|
5993 |
|
---|
5994 | // Plot results
|
---|
5995 | int flag = this.plotXY(f);
|
---|
5996 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
5997 | }
|
---|
5998 |
|
---|
5999 | if(plotFlag==3){
|
---|
6000 | // Plot results
|
---|
6001 | int flag = this.plotXYfixed(regFun2, "Gaussian distribution - all parameters fixed");
|
---|
6002 | }
|
---|
6003 |
|
---|
6004 | if(yFlag){
|
---|
6005 | // restore data
|
---|
6006 | for(int i=0; i<this.nData-1; i++){
|
---|
6007 | this.yData[i]=-this.yData[i];
|
---|
6008 | }
|
---|
6009 | }
|
---|
6010 | }
|
---|
6011 |
|
---|
6012 | // Fit to multiple Gaussians
|
---|
6013 | public void multipleGaussiansPlot(int nGaussians, double[] initMeans, double[] initSDs, double[] initFracts){
|
---|
6014 | if(initMeans.length!=nGaussians)throw new IllegalArgumentException("length of initial means array, " + initMeans.length + ", does not equal the number of Gaussians, " + nGaussians);
|
---|
6015 | if(initSDs.length!=nGaussians)throw new IllegalArgumentException("length of initial standard deviations array, " + initSDs.length + ", does not equal the number of Gaussians, " + nGaussians);
|
---|
6016 | if(initFracts.length!=nGaussians)throw new IllegalArgumentException("length of initial fractional weights array, " + initFracts.length + ", does not equal the number of Gaussians, " + nGaussians);
|
---|
6017 | double sum = 0.0;
|
---|
6018 | for(int i=0; i<nGaussians; i++)sum += initFracts[i];
|
---|
6019 | if(sum!=1.0){
|
---|
6020 | System.out.println("Regression method multipleGaussiansPlot: the sum of the initial estimates of the fractional weights, " + sum + ", does not equal 1.0");
|
---|
6021 | System.out.println("Program continued using the supplied fractional weights");
|
---|
6022 | }
|
---|
6023 | this.fitMultipleGaussians(nGaussians, initMeans, initSDs, initFracts, 1);
|
---|
6024 | }
|
---|
6025 |
|
---|
6026 |
|
---|
6027 | // Fit data to multiple Gaussian (normal) probability functions
|
---|
6028 | protected void fitMultipleGaussians(int nGaussians, double[] initMeans, double[] initSDs, double[] initFracts, int plotFlag){
|
---|
6029 | this.nGaussians = nGaussians;
|
---|
6030 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
6031 | this.lastMethod=49;
|
---|
6032 | this.linNonLin = false;
|
---|
6033 | this.zeroCheck = false;
|
---|
6034 | this.nTerms=3*this.nGaussians;
|
---|
6035 | boolean scaleFlagHold = this.scaleFlag;
|
---|
6036 | this.scaleFlag = false;
|
---|
6037 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
6038 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
6039 | int nMaxHold = this.nMax;
|
---|
6040 | if(this.nMax<10000)this.nMax = 10000;
|
---|
6041 |
|
---|
6042 | // order data into ascending order of the abscissae
|
---|
6043 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
6044 |
|
---|
6045 | // check sign of y data
|
---|
6046 | Double tempd=null;
|
---|
6047 | ArrayList<Object> retY = Regression.dataSign(yData);
|
---|
6048 | tempd = (Double)retY.get(4);
|
---|
6049 | double yPeak = tempd.doubleValue();
|
---|
6050 | boolean yFlag = false;
|
---|
6051 | if(yPeak<0.0D){
|
---|
6052 | System.out.println("Regression.fitGaussian(): This implementation of the Gaussian distribution takes only positive y values\n(noise taking low values below zero are allowed)");
|
---|
6053 | System.out.println("All y values have been multiplied by -1 before fitting");
|
---|
6054 | for(int i =0; i<this.nData; i++){
|
---|
6055 | yData[i] = -yData[i];
|
---|
6056 | }
|
---|
6057 | retY = Regression.dataSign(yData);
|
---|
6058 | yFlag=true;
|
---|
6059 | }
|
---|
6060 |
|
---|
6061 | // Calculate x value at peak y (estimate of the Gaussian mean)
|
---|
6062 | ArrayList<Object> ret1 = Regression.dataSign(yData);
|
---|
6063 | Integer tempi = null;
|
---|
6064 | tempi = (Integer)ret1.get(5);
|
---|
6065 | int peaki = tempi.intValue();
|
---|
6066 | double mean = xData[0][peaki];
|
---|
6067 |
|
---|
6068 | // Calculate an estimate of the sd
|
---|
6069 | double sd = Math.sqrt(2.0D)*halfWidth(xData[0], yData);
|
---|
6070 |
|
---|
6071 | // Calculate estimate of y scale
|
---|
6072 | tempd = (Double)ret1.get(4);
|
---|
6073 | double ym = tempd.doubleValue();
|
---|
6074 |
|
---|
6075 |
|
---|
6076 | // Fill arrays needed by the Simplex
|
---|
6077 | double[] start = new double[this.nTerms];
|
---|
6078 | double[] step = new double[this.nTerms];
|
---|
6079 | int counter = 0;
|
---|
6080 | for(int i=0; i<nGaussians; i++){
|
---|
6081 | start[counter] = initMeans[i];
|
---|
6082 | step[counter] = Math.abs(0.1D*start[counter]);
|
---|
6083 | start[counter+1] = initSDs[i];
|
---|
6084 | step[counter+1] = Math.abs(0.1D*start[counter+1]);
|
---|
6085 | if(step[counter+1]==0.0D){
|
---|
6086 | ArrayList<Object> ret0 = Regression.dataSign(xData[0]);
|
---|
6087 | Double tempdd = null;
|
---|
6088 | tempdd = (Double)ret0.get(2);
|
---|
6089 | double xmax = tempdd.doubleValue();
|
---|
6090 | if(xmax==0.0D){
|
---|
6091 | tempdd = (Double)ret0.get(0);
|
---|
6092 | xmax = tempdd.doubleValue();
|
---|
6093 | }
|
---|
6094 | step[counter+1]=Math.abs(xmax*0.1D);
|
---|
6095 | }
|
---|
6096 | start[counter+2] = initFracts[i]*Math.sqrt(2.0*Math.PI)*start[counter+1]*ym;
|
---|
6097 | step[counter+2] = Math.abs(0.1D*start[counter+2]);
|
---|
6098 | counter += 3;
|
---|
6099 | }
|
---|
6100 |
|
---|
6101 | // Nelder and Mead Simplex Regression
|
---|
6102 | MultipleGaussianFunction f = new MultipleGaussianFunction();
|
---|
6103 |
|
---|
6104 | f.scaleOption = this.scaleFlag;
|
---|
6105 | double ysf = this.yScaleFactor;
|
---|
6106 | if(!this.scaleFlag)ysf = 1.0;
|
---|
6107 | f.scaleFactor = ysf;
|
---|
6108 | f.nGaussians = this.nGaussians;
|
---|
6109 |
|
---|
6110 | // Add constraints
|
---|
6111 | for(int i=0; i<this.nGaussians; i++){
|
---|
6112 | this.addConstraint(3*i+1,-1, 0.0D);
|
---|
6113 | this.addConstraint(3*i+2,-1, 0.0D);
|
---|
6114 | }
|
---|
6115 |
|
---|
6116 | Object regFun2 = (Object)f;
|
---|
6117 | this.nelderMead(regFun2, start, step, this.fTol, this.nMax);
|
---|
6118 |
|
---|
6119 | this.multGaussFract = new double[this.nGaussians];
|
---|
6120 | this.multGaussFractErrors = new double[this.nGaussians];
|
---|
6121 | this.multGaussCoeffVar = new double[this.nGaussians];
|
---|
6122 | this.multGaussTvalue = new double[this.nGaussians];
|
---|
6123 | this.multGaussPvalue= new double[this.nGaussians];
|
---|
6124 | for(int i=0; i<nGaussians; i++){
|
---|
6125 | this.multGaussFractErrors[i] = Double.NaN;
|
---|
6126 | this.multGaussCoeffVar[i] = Double.NaN;
|
---|
6127 | this.multGaussTvalue[i] = Double.NaN;
|
---|
6128 | this.multGaussPvalue[i] = Double.NaN;
|
---|
6129 | }
|
---|
6130 | this.multGaussScaleError = Double.NaN;
|
---|
6131 | this.multGaussScaleCoeffVar = Double.NaN;
|
---|
6132 | this.multGaussScaleTvalue = Double.NaN;
|
---|
6133 | this.multGaussScalePvalue = Double.NaN;
|
---|
6134 | this.multGaussScaleTvalue = Double.NaN;
|
---|
6135 | this.multGaussScalePvalue = Double.NaN;
|
---|
6136 |
|
---|
6137 | if(this.invertFlag){
|
---|
6138 | ErrorProp[] multGaussErrorProp = new ErrorProp[this.nGaussians];
|
---|
6139 | ErrorProp sum = new ErrorProp(0.0, 0.0);
|
---|
6140 | for(int i=0; i<nGaussians; i++){
|
---|
6141 | multGaussErrorProp[i] = new ErrorProp(this.best[3*i+2], this.bestSd[3*i+2]);
|
---|
6142 | sum = sum.plus(multGaussErrorProp[i]);
|
---|
6143 | }
|
---|
6144 | ErrorProp epScale = new ErrorProp(0.0, 0.0);
|
---|
6145 | for(int i=0; i<nGaussians; i++){
|
---|
6146 | ErrorProp epFract = multGaussErrorProp[i].over(sum);
|
---|
6147 | this.multGaussFract[i] = (epFract).getValue();
|
---|
6148 | this.multGaussFractErrors[i] = (epFract).getError();
|
---|
6149 | epScale = epScale.plus(multGaussErrorProp[i].over(epFract));
|
---|
6150 | this.multGaussCoeffVar[i] = 100.0*this.multGaussFractErrors[i]/this.multGaussFract[i];
|
---|
6151 | this.multGaussTvalue[i] = this.multGaussFract[i]/this.multGaussFractErrors[i];
|
---|
6152 | double atv = Math.abs(this.multGaussTvalue[i]);
|
---|
6153 | if(atv!=atv){
|
---|
6154 | this.multGaussPvalue[i] = Double.NaN;
|
---|
6155 | }
|
---|
6156 | else{
|
---|
6157 | this.multGaussPvalue[i] = 1.0 - Stat.studentTcdf(-atv, atv, this.degreesOfFreedom);
|
---|
6158 | }
|
---|
6159 | }
|
---|
6160 | epScale = epScale.over(this.nGaussians);
|
---|
6161 | this.multGaussScale = epScale.getValue();
|
---|
6162 | this.multGaussScaleError = epScale.getError();
|
---|
6163 | this.multGaussScaleCoeffVar = 100.0*this.multGaussScaleError/this.multGaussScale;
|
---|
6164 | this.multGaussScaleTvalue = this.multGaussScale/this.multGaussScaleError;
|
---|
6165 | double atv = Math.abs(this.multGaussScaleTvalue);
|
---|
6166 | if(atv!=atv){
|
---|
6167 | this.multGaussScalePvalue = Double.NaN;
|
---|
6168 | }
|
---|
6169 | else{
|
---|
6170 | this.multGaussScalePvalue = 1.0 - Stat.studentTcdf(-atv, atv, this.degreesOfFreedom);
|
---|
6171 | }
|
---|
6172 | }
|
---|
6173 |
|
---|
6174 | else{
|
---|
6175 | double sum = 0.0;
|
---|
6176 | for(int i=0; i<nGaussians; i++){
|
---|
6177 | sum += best[3*i+2];
|
---|
6178 | }
|
---|
6179 | this.multGaussScale = 0.0;
|
---|
6180 | for(int i=0; i<nGaussians; i++){
|
---|
6181 | this.multGaussFract[i] = best[3*i+2]/sum;
|
---|
6182 | this.multGaussScale += this.multGaussFract[i];
|
---|
6183 | }
|
---|
6184 | this.multGaussScale /= this.nGaussians;
|
---|
6185 | }
|
---|
6186 |
|
---|
6187 | if(plotFlag==1){
|
---|
6188 | // Print results
|
---|
6189 | if(!this.supressPrint)this.print();
|
---|
6190 |
|
---|
6191 | // Plot results
|
---|
6192 | int flag = this.plotXY(f);
|
---|
6193 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
6194 | }
|
---|
6195 |
|
---|
6196 | if(yFlag){
|
---|
6197 | // restore data
|
---|
6198 | for(int i=0; i<this.nData-1; i++){
|
---|
6199 | this.yData[i]=-this.yData[i];
|
---|
6200 | }
|
---|
6201 | }
|
---|
6202 | this.nMax = nMaxHold;
|
---|
6203 | this.scaleFlag = scaleFlagHold;
|
---|
6204 |
|
---|
6205 | }
|
---|
6206 |
|
---|
6207 |
|
---|
6208 | // FIT TO LOG-NORMAL DISTRIBUTIONS (TWO AND THREE PARAMETERS)
|
---|
6209 |
|
---|
6210 | // TWO PARAMETER LOG-NORMAL DISTRIBUTION
|
---|
6211 | // Fit to a two parameter log-normal distribution
|
---|
6212 | public void logNormal(){
|
---|
6213 | this.fitLogNormalTwoPar(0);
|
---|
6214 | }
|
---|
6215 |
|
---|
6216 | public void logNormalTwoPar(){
|
---|
6217 | this.fitLogNormalTwoPar(0);
|
---|
6218 | }
|
---|
6219 |
|
---|
6220 | // Fit to a two parameter log-normal distribution and plot result
|
---|
6221 | public void logNormalPlot(){
|
---|
6222 | this.fitLogNormalTwoPar(1);
|
---|
6223 | }
|
---|
6224 |
|
---|
6225 | public void logNormalTwoParPlot(){
|
---|
6226 | this.fitLogNormalTwoPar(1);
|
---|
6227 | }
|
---|
6228 |
|
---|
6229 | // Fit data to a two parameterlog-normal probability function
|
---|
6230 | protected void fitLogNormalTwoPar(int plotFlag){
|
---|
6231 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
6232 | this.lastMethod=36;
|
---|
6233 | this.userSupplied = false;
|
---|
6234 | this.linNonLin = false;
|
---|
6235 | this.zeroCheck = false;
|
---|
6236 | this.nTerms=3;
|
---|
6237 | if(!this.scaleFlag)this.nTerms=2;
|
---|
6238 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
6239 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
6240 |
|
---|
6241 | // order data into ascending order of the abscissae
|
---|
6242 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
6243 |
|
---|
6244 | // check sign of y data
|
---|
6245 | Double tempd=null;
|
---|
6246 | ArrayList<Object> retY = Regression.dataSign(yData);
|
---|
6247 | tempd = (Double)retY.get(4);
|
---|
6248 | double yPeak = tempd.doubleValue();
|
---|
6249 | boolean yFlag = false;
|
---|
6250 | if(yPeak<0.0D){
|
---|
6251 | System.out.println("Regression.fitLogNormalTwoPar(): This implementation of the two parameter log-nprmal distribution takes only positive y values\n(noise taking low values below zero are allowed)");
|
---|
6252 | System.out.println("All y values have been multiplied by -1 before fitting");
|
---|
6253 | for(int i =0; i<this.nData; i++){
|
---|
6254 | yData[i] = -yData[i];
|
---|
6255 | }
|
---|
6256 | retY = Regression.dataSign(yData);
|
---|
6257 | yFlag=true;
|
---|
6258 | }
|
---|
6259 |
|
---|
6260 | // Calculate x value at peak y
|
---|
6261 | ArrayList<Object> ret1 = Regression.dataSign(yData);
|
---|
6262 | Integer tempi = null;
|
---|
6263 | tempi = (Integer)ret1.get(5);
|
---|
6264 | int peaki = tempi.intValue();
|
---|
6265 | double mean = xData[0][peaki];
|
---|
6266 |
|
---|
6267 | // Calculate an estimate of the mu
|
---|
6268 | double mu = 0.0D;
|
---|
6269 | for(int i=0; i<this.nData; i++)mu += Math.log(xData[0][i]);
|
---|
6270 | mu /= this.nData;
|
---|
6271 |
|
---|
6272 | // Calculate estimate of sigma
|
---|
6273 | double sigma = 0.0D;
|
---|
6274 | for(int i=0; i<this.nData; i++)sigma += Fmath.square(Math.log(xData[0][i]) - mu);
|
---|
6275 | sigma = Math.sqrt(sigma/this.nData);
|
---|
6276 |
|
---|
6277 | // Calculate estimate of y scale
|
---|
6278 | tempd = (Double)ret1.get(4);
|
---|
6279 | double ym = tempd.doubleValue();
|
---|
6280 | ym=ym*Math.exp(mu - sigma*sigma/2);
|
---|
6281 |
|
---|
6282 | // Fill arrays needed by the Simplex
|
---|
6283 | double[] start = new double[this.nTerms];
|
---|
6284 | double[] step = new double[this.nTerms];
|
---|
6285 | start[0] = mu;
|
---|
6286 | start[1] = sigma;
|
---|
6287 | if(this.scaleFlag){
|
---|
6288 | start[2] = ym;
|
---|
6289 | }
|
---|
6290 | step[0] = 0.1D*start[0];
|
---|
6291 | step[1] = 0.1D*start[1];
|
---|
6292 | if(step[0]==0.0D){
|
---|
6293 | ArrayList<Object> ret0 = Regression.dataSign(xData[0]);
|
---|
6294 | Double tempdd = null;
|
---|
6295 | tempdd = (Double)ret0.get(2);
|
---|
6296 | double xmax = tempdd.doubleValue();
|
---|
6297 | if(xmax==0.0D){
|
---|
6298 | tempdd = (Double)ret0.get(0);
|
---|
6299 | xmax = tempdd.doubleValue();
|
---|
6300 | }
|
---|
6301 | step[0]=xmax*0.1D;
|
---|
6302 | }
|
---|
6303 | if(step[0]==0.0D){
|
---|
6304 | ArrayList<Object> ret0 = Regression.dataSign(xData[0]);
|
---|
6305 | Double tempdd = null;
|
---|
6306 | tempdd = (Double)ret0.get(2);
|
---|
6307 | double xmax = tempdd.doubleValue();
|
---|
6308 | if(xmax==0.0D){
|
---|
6309 | tempdd = (Double)ret0.get(0);
|
---|
6310 | xmax = tempdd.doubleValue();
|
---|
6311 | }
|
---|
6312 | step[1]=xmax*0.1D;
|
---|
6313 | }
|
---|
6314 | if(this.scaleFlag)step[2] = 0.1D*start[2];
|
---|
6315 |
|
---|
6316 | // Nelder and Mead Simplex Regression
|
---|
6317 | LogNormalTwoParFunction f = new LogNormalTwoParFunction();
|
---|
6318 | this.addConstraint(1,-1,0.0D);
|
---|
6319 | f.scaleOption = this.scaleFlag;
|
---|
6320 | f.scaleFactor = this.yScaleFactor;
|
---|
6321 | Object regFun2 = (Object)f;
|
---|
6322 | this.nelderMead(regFun2, start, step, this.fTol, this.nMax);
|
---|
6323 |
|
---|
6324 | if(plotFlag==1){
|
---|
6325 | // Print results
|
---|
6326 | if(!this.supressPrint)this.print();
|
---|
6327 |
|
---|
6328 | // Plot results
|
---|
6329 | int flag = this.plotXY(f);
|
---|
6330 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
6331 | }
|
---|
6332 |
|
---|
6333 | if(yFlag){
|
---|
6334 | // restore data
|
---|
6335 | for(int i=0; i<this.nData-1; i++){
|
---|
6336 | this.yData[i]=-this.yData[i];
|
---|
6337 | }
|
---|
6338 | }
|
---|
6339 | }
|
---|
6340 |
|
---|
6341 |
|
---|
6342 | // THREE PARAMETER LOG-NORMAL DISTRIBUTION
|
---|
6343 | // Fit to a three parameter log-normal distribution
|
---|
6344 | public void logNormalThreePar(){
|
---|
6345 | this.fitLogNormalThreePar(0);
|
---|
6346 | }
|
---|
6347 |
|
---|
6348 | // Fit to a three parameter log-normal distribution and plot result
|
---|
6349 | public void logNormalThreeParPlot(){
|
---|
6350 | this.fitLogNormalThreePar(1);
|
---|
6351 | }
|
---|
6352 |
|
---|
6353 | // Fit data to a three parameter log-normal probability function
|
---|
6354 | protected void fitLogNormalThreePar(int plotFlag){
|
---|
6355 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
6356 | this.lastMethod=37;
|
---|
6357 | this.userSupplied = false;
|
---|
6358 | this.linNonLin = false;
|
---|
6359 | this.zeroCheck = false;
|
---|
6360 | this.nTerms=4;
|
---|
6361 | if(!this.scaleFlag)this.nTerms=3;
|
---|
6362 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
6363 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
6364 |
|
---|
6365 | // order data into ascending order of the abscissae
|
---|
6366 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
6367 |
|
---|
6368 | // check sign of y data
|
---|
6369 | Double tempd=null;
|
---|
6370 | ArrayList<Object> retY = Regression.dataSign(yData);
|
---|
6371 | tempd = (Double)retY.get(4);
|
---|
6372 | double yPeak = tempd.doubleValue();
|
---|
6373 | boolean yFlag = false;
|
---|
6374 | if(yPeak<0.0D){
|
---|
6375 | System.out.println("Regression.fitLogNormalThreePar(): This implementation of the three parameter log-normal distribution takes only positive y values\n(noise taking low values below zero are allowed)");
|
---|
6376 | System.out.println("All y values have been multiplied by -1 before fitting");
|
---|
6377 | for(int i =0; i<this.nData; i++){
|
---|
6378 | yData[i] = -yData[i];
|
---|
6379 | }
|
---|
6380 | retY = Regression.dataSign(yData);
|
---|
6381 | yFlag=true;
|
---|
6382 | }
|
---|
6383 |
|
---|
6384 | // Calculate x value at peak y
|
---|
6385 | ArrayList<Object> ret1 = Regression.dataSign(yData);
|
---|
6386 | Integer tempi = null;
|
---|
6387 | tempi = (Integer)ret1.get(5);
|
---|
6388 | int peaki = tempi.intValue();
|
---|
6389 | double mean = xData[0][peaki];
|
---|
6390 |
|
---|
6391 | // Calculate an estimate of the gamma
|
---|
6392 | double gamma = 0.0D;
|
---|
6393 | for(int i=0; i<this.nData; i++)gamma += xData[0][i];
|
---|
6394 | gamma /= this.nData;
|
---|
6395 |
|
---|
6396 | // Calculate estimate of beta
|
---|
6397 | double beta = 0.0D;
|
---|
6398 | for(int i=0; i<this.nData; i++)beta += Fmath.square(Math.log(xData[0][i]) - Math.log(gamma));
|
---|
6399 | beta = Math.sqrt(beta/this.nData);
|
---|
6400 |
|
---|
6401 | // Calculate estimate of alpha
|
---|
6402 | ArrayList<Object> ret0 = Regression.dataSign(xData[0]);
|
---|
6403 | Double tempdd = null;
|
---|
6404 | tempdd = (Double)ret0.get(0);
|
---|
6405 | double xmin = tempdd.doubleValue();
|
---|
6406 | tempdd = (Double)ret0.get(2);
|
---|
6407 | double xmax = tempdd.doubleValue();
|
---|
6408 | double alpha = xmin - (xmax - xmin)/100.0D;;
|
---|
6409 | if(xmin==0.0D)alpha -= (xmax - xmin)/100.0D;
|
---|
6410 |
|
---|
6411 |
|
---|
6412 | // Calculate estimate of y scale
|
---|
6413 | tempd = (Double)ret1.get(4);
|
---|
6414 | double ym = tempd.doubleValue();
|
---|
6415 | ym=ym*(gamma+alpha)*Math.exp(- beta*beta/2);
|
---|
6416 |
|
---|
6417 | // Fill arrays needed by the Simplex
|
---|
6418 | double[] start = new double[this.nTerms];
|
---|
6419 | double[] step = new double[this.nTerms];
|
---|
6420 | start[0] = alpha;
|
---|
6421 | start[1] = beta;
|
---|
6422 | start[2] = gamma;
|
---|
6423 | if(this.scaleFlag){
|
---|
6424 | start[3] = ym;
|
---|
6425 | }
|
---|
6426 | step[0] = 0.1D*start[0];
|
---|
6427 | step[1] = 0.1D*start[1];
|
---|
6428 | step[2] = 0.1D*start[2];
|
---|
6429 | for(int i=0; i<3; i++){
|
---|
6430 | if(step[i]==0.0D)step[i]=xmax*0.1D;
|
---|
6431 | }
|
---|
6432 | if(this.scaleFlag)step[3] = 0.1D*start[3];
|
---|
6433 |
|
---|
6434 | // Nelder and Mead Simplex Regression
|
---|
6435 | LogNormalThreeParFunction f = new LogNormalThreeParFunction();
|
---|
6436 | this.addConstraint(0,+1,xmin);
|
---|
6437 | this.addConstraint(1,-1,0.0D);
|
---|
6438 | this.addConstraint(2,-1,0.0D);
|
---|
6439 |
|
---|
6440 | f.scaleOption = this.scaleFlag;
|
---|
6441 | f.scaleFactor = this.yScaleFactor;
|
---|
6442 | Object regFun2 = (Object)f;
|
---|
6443 | this.nelderMead(regFun2, start, step, this.fTol, this.nMax);
|
---|
6444 |
|
---|
6445 | if(plotFlag==1){
|
---|
6446 | // Print results
|
---|
6447 | if(!this.supressPrint)this.print();
|
---|
6448 |
|
---|
6449 | // Plot results
|
---|
6450 | int flag = this.plotXY(f);
|
---|
6451 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
6452 | }
|
---|
6453 |
|
---|
6454 | if(yFlag){
|
---|
6455 | // restore data
|
---|
6456 | for(int i=0; i<this.nData-1; i++){
|
---|
6457 | this.yData[i]=-this.yData[i];
|
---|
6458 | }
|
---|
6459 | }
|
---|
6460 | }
|
---|
6461 |
|
---|
6462 |
|
---|
6463 | // FIT TO A LORENTZIAN DISTRIBUTION
|
---|
6464 |
|
---|
6465 | // Fit data to a lorentzian
|
---|
6466 | public void lorentzian(){
|
---|
6467 | this.fitLorentzian(0);
|
---|
6468 | }
|
---|
6469 |
|
---|
6470 | public void lorentzianPlot(){
|
---|
6471 | this.fitLorentzian(1);
|
---|
6472 | }
|
---|
6473 |
|
---|
6474 | protected void fitLorentzian(int allTest){
|
---|
6475 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
6476 | this.lastMethod=5;
|
---|
6477 | this.userSupplied = false;
|
---|
6478 | this.linNonLin = false;
|
---|
6479 | this.zeroCheck = false;
|
---|
6480 | this.nTerms=3;
|
---|
6481 | if(!this.scaleFlag)this.nTerms=2;
|
---|
6482 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
6483 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
6484 |
|
---|
6485 | // order data into ascending order of the abscissae
|
---|
6486 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
6487 |
|
---|
6488 | // check sign of y data
|
---|
6489 | Double tempd=null;
|
---|
6490 | ArrayList<Object> retY = Regression.dataSign(yData);
|
---|
6491 | tempd = (Double)retY.get(4);
|
---|
6492 | double yPeak = tempd.doubleValue();
|
---|
6493 | boolean yFlag = false;
|
---|
6494 | if(yPeak<0.0D){
|
---|
6495 | System.out.println("Regression.fitLorentzian(): This implementation of the Lorentzian distribution takes only positive y values\n(noise taking low values below zero are allowed)");
|
---|
6496 | System.out.println("All y values have been multiplied by -1 before fitting");
|
---|
6497 | for(int i =0; i<this.nData; i++){
|
---|
6498 | yData[i] = -yData[i];
|
---|
6499 | }
|
---|
6500 | retY = Regression.dataSign(yData);
|
---|
6501 | yFlag=true;
|
---|
6502 | }
|
---|
6503 |
|
---|
6504 | // Calculate x value at peak y (estimate of the distribution mode)
|
---|
6505 | ArrayList ret1 = Regression.dataSign(yData);
|
---|
6506 | Integer tempi = null;
|
---|
6507 | tempi = (Integer)ret1.get(5);
|
---|
6508 | int peaki = tempi.intValue();
|
---|
6509 | double mean = xData[0][peaki];
|
---|
6510 |
|
---|
6511 | // Calculate an estimate of the half-height width
|
---|
6512 | double sd = halfWidth(xData[0], yData);
|
---|
6513 |
|
---|
6514 | // Calculate estimate of y scale
|
---|
6515 | tempd = (Double)ret1.get(4);
|
---|
6516 | double ym = tempd.doubleValue();
|
---|
6517 | ym=ym*sd*Math.PI/2.0D;
|
---|
6518 |
|
---|
6519 | // Fill arrays needed by the Simplex
|
---|
6520 | double[] start = new double[this.nTerms];
|
---|
6521 | double[] step = new double[this.nTerms];
|
---|
6522 | start[0] = mean;
|
---|
6523 | start[1] = sd*0.9D;
|
---|
6524 | if(this.scaleFlag){
|
---|
6525 | start[2] = ym;
|
---|
6526 | }
|
---|
6527 | step[0] = 0.2D*sd;
|
---|
6528 | if(step[0]==0.0D){
|
---|
6529 | ArrayList<Object> ret0 = Regression.dataSign(xData[0]);
|
---|
6530 | Double tempdd = null;
|
---|
6531 | tempdd = (Double)ret0.get(2);
|
---|
6532 | double xmax = tempdd.doubleValue();
|
---|
6533 | if(xmax==0.0D){
|
---|
6534 | tempdd = (Double)ret0.get(0);
|
---|
6535 | xmax = tempdd.doubleValue();
|
---|
6536 | }
|
---|
6537 | step[0]=xmax*0.1D;
|
---|
6538 | }
|
---|
6539 | step[1] = 0.2D*start[1];
|
---|
6540 | if(this.scaleFlag)step[2] = 0.2D*start[2];
|
---|
6541 |
|
---|
6542 | // Nelder and Mead Simplex Regression
|
---|
6543 | LorentzianFunction f = new LorentzianFunction();
|
---|
6544 | this.addConstraint(1,-1,0.0D);
|
---|
6545 | f.scaleOption = this.scaleFlag;
|
---|
6546 | f.scaleFactor = this.yScaleFactor;
|
---|
6547 | Object regFun2 = (Object)f;
|
---|
6548 | this.nelderMead(regFun2, start, step, this.fTol, this.nMax);
|
---|
6549 |
|
---|
6550 | if(allTest==1){
|
---|
6551 | // Print results
|
---|
6552 | if(!this.supressPrint)this.print();
|
---|
6553 |
|
---|
6554 | // Plot results
|
---|
6555 | int flag = this.plotXY(f);
|
---|
6556 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
6557 | }
|
---|
6558 |
|
---|
6559 | if(yFlag){
|
---|
6560 | // restore data
|
---|
6561 | for(int i=0; i<this.nData-1; i++){
|
---|
6562 | this.yData[i]=-this.yData[i];
|
---|
6563 | }
|
---|
6564 | }
|
---|
6565 |
|
---|
6566 | }
|
---|
6567 |
|
---|
6568 |
|
---|
6569 | // Static method allowing fitting of a data array to one or several of the above distributions
|
---|
6570 | public static void fitOneOrSeveralDistributions(double[] array){
|
---|
6571 |
|
---|
6572 | int numberOfPoints = array.length; // number of points
|
---|
6573 | double maxValue = Fmath.maximum(array); // maximum value of distribution
|
---|
6574 | double minValue = Fmath.minimum(array); // minimum value of distribution
|
---|
6575 | double span = maxValue - minValue; // span of distribution
|
---|
6576 |
|
---|
6577 | // Calculation of number of bins and bin width
|
---|
6578 | int numberOfBins = (int)Math.ceil(Math.sqrt(numberOfPoints));
|
---|
6579 | double binWidth = span/numberOfBins;
|
---|
6580 | double averagePointsPerBin = (double)numberOfPoints/(double)numberOfBins;
|
---|
6581 |
|
---|
6582 | // Option for altering bin width
|
---|
6583 | String comment = "Maximum value: " + maxValue + "\n";
|
---|
6584 | comment += "Minimum value: " + minValue + "\n";
|
---|
6585 | comment += "Suggested bin width: " + binWidth + "\n";
|
---|
6586 | comment += "Giving an average points per bin: " + averagePointsPerBin + "\n";
|
---|
6587 | comment += "If you wish to change the bin width enter the new value below \n";
|
---|
6588 | comment += "and click on OK\n";
|
---|
6589 | comment += "If you do NOT wish to change the bin width simply click on OK";
|
---|
6590 | binWidth = Db.readDouble(comment, binWidth);
|
---|
6591 |
|
---|
6592 | // Create output file
|
---|
6593 | comment = "Input the name of the output text file\n";
|
---|
6594 | comment += "[Do not forget the extension, e.g. .txt]";
|
---|
6595 | String outputTitle = Db.readLine(comment, "fitOneOrSeveralDistributionsOutput.txt");
|
---|
6596 | FileOutput fout = new FileOutput(outputTitle, 'n');
|
---|
6597 | fout.println("Fitting a set of data to one or more distributions");
|
---|
6598 | fout.println("Class Regression/Stat: method fitAllDistributions");
|
---|
6599 | fout.dateAndTimeln();
|
---|
6600 | fout.println();
|
---|
6601 | fout.printtab("Number of points: ");
|
---|
6602 | fout.println(numberOfPoints);
|
---|
6603 | fout.printtab("Minimum value: ");
|
---|
6604 | fout.println(minValue);
|
---|
6605 | fout.printtab("Maximum value: ");
|
---|
6606 | fout.println(maxValue);
|
---|
6607 | fout.printtab("Number of bins: ");
|
---|
6608 | fout.println(numberOfBins);
|
---|
6609 | fout.printtab("Bin width: ");
|
---|
6610 | fout.println(binWidth);
|
---|
6611 | fout.printtab("Average number of points per bin: ");
|
---|
6612 | fout.println(averagePointsPerBin);
|
---|
6613 | fout.println();
|
---|
6614 |
|
---|
6615 | // Choose distributions and perform regression
|
---|
6616 | String[] comments = {"Gaussian Distribution", "Two parameter Log-normal Distribution", "Three parameter Log-normal Distribution", "Logistic Distribution", "Lorentzian Distribution", "Type 1 Extreme Distribution - Gumbel minimum order statistic", "Type 1 Extreme Distribution - Gumbel maximum order statistic", "Type 2 Extreme Distribution - Frechet", "Type 3 Extreme Distribution - Weibull", "Type 3 Extreme Distribution - Exponential Distribution", "Type 3 Extreme Distribution - Rayleigh Distribution", "Pareto Distribution", "Beta Distribution", "Gamma Distribution", "Erlang Distribution", "exit"};
|
---|
6617 | String[] boxTitles = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "exit"};
|
---|
6618 | String headerComment = "Choose next distribution to be fitted by clicking on box number";
|
---|
6619 | int defaultBox = 1;
|
---|
6620 | boolean testDistType = true;
|
---|
6621 | Regression reg = null;
|
---|
6622 | double[] coeff = null;
|
---|
6623 | while(testDistType){
|
---|
6624 | int opt = Db.optionBox(headerComment, comments, boxTitles, defaultBox);
|
---|
6625 | switch(opt){
|
---|
6626 | case 1: // Gaussian
|
---|
6627 | reg = new Regression(array, binWidth);
|
---|
6628 | reg.supressPrint();
|
---|
6629 | reg.gaussianPlot();
|
---|
6630 | coeff = reg.getCoeff();
|
---|
6631 | fout.println("NORMAL (GAUSSIAN) DISTRIBUTION");
|
---|
6632 | fout.println("Best Estimates:");
|
---|
6633 | fout.printtab("Mean [mu] ");
|
---|
6634 | fout.println(coeff[0]);
|
---|
6635 | fout.printtab("Standard deviation [sigma] ");
|
---|
6636 | fout.println(coeff[1]);
|
---|
6637 | fout.printtab("Scaling factor [Ao] ");
|
---|
6638 | fout.println(coeff[2]);
|
---|
6639 | Regression.regressionDetails(fout, reg);
|
---|
6640 | break;
|
---|
6641 | case 2: // Two parameter Log-normal
|
---|
6642 | reg = new Regression(array, binWidth);
|
---|
6643 | reg.supressPrint();
|
---|
6644 | reg.logNormalTwoParPlot();
|
---|
6645 | coeff = reg.getCoeff();
|
---|
6646 | fout.println("LOG-NORMAL DISTRIBUTION (two parameter statistic)");
|
---|
6647 | fout.println("Best Estimates:");
|
---|
6648 | fout.printtab("Location parameter [mu] ");
|
---|
6649 | fout.println(coeff[0]);
|
---|
6650 | fout.printtab("Shape parameter [sigma] ");
|
---|
6651 | fout.println(coeff[1]);
|
---|
6652 | fout.printtab("Scaling factor [Ao] ");
|
---|
6653 | fout.println(coeff[2]);
|
---|
6654 | Regression.regressionDetails(fout, reg);
|
---|
6655 | break;
|
---|
6656 | case 3: // Three parameter Log-normal
|
---|
6657 | reg = new Regression(array, binWidth);
|
---|
6658 | reg.supressPrint();
|
---|
6659 | reg.logNormalThreeParPlot();
|
---|
6660 | coeff = reg.getCoeff();
|
---|
6661 | fout.println("LOG-NORMAL DISTRIBUTION (three parameter statistic)");
|
---|
6662 | fout.println("Best Estimates:");
|
---|
6663 | fout.printtab("Location parameter [alpha] ");
|
---|
6664 | fout.println(coeff[0]);
|
---|
6665 | fout.printtab("Shape parameter [beta] ");
|
---|
6666 | fout.println(coeff[1]);
|
---|
6667 | fout.printtab("Scale parameter [gamma] ");
|
---|
6668 | fout.println(coeff[2]);
|
---|
6669 | fout.printtab("Scaling factor [Ao] ");
|
---|
6670 | fout.println(coeff[3]);
|
---|
6671 | Regression.regressionDetails(fout, reg);
|
---|
6672 | break;
|
---|
6673 | case 4: // Logistic
|
---|
6674 | reg = new Regression(array, binWidth);
|
---|
6675 | reg.supressPrint();
|
---|
6676 | reg.logisticPlot();
|
---|
6677 | coeff = reg.getCoeff();
|
---|
6678 | fout.println("LOGISTIC DISTRIBUTION");
|
---|
6679 | fout.println("Best Estimates:");
|
---|
6680 | fout.printtab("Location parameter [mu] ");
|
---|
6681 | fout.println(coeff[0]);
|
---|
6682 | fout.printtab("Scale parameter [beta] ");
|
---|
6683 | fout.println(coeff[1]);
|
---|
6684 | fout.printtab("Scaling factor [Ao] ");
|
---|
6685 | fout.println(coeff[2]);
|
---|
6686 | Regression.regressionDetails(fout, reg);
|
---|
6687 | break;
|
---|
6688 | case 5: // Lorentzian
|
---|
6689 | reg = new Regression(array, binWidth);
|
---|
6690 | reg.supressPrint();
|
---|
6691 | reg.lorentzianPlot();
|
---|
6692 | coeff = reg.getCoeff();
|
---|
6693 | fout.println("LORENTZIAN DISTRIBUTION");
|
---|
6694 | fout.println("Best Estimates:");
|
---|
6695 | fout.printtab("Mean [mu] ");
|
---|
6696 | fout.println(coeff[0]);
|
---|
6697 | fout.printtab("Half-height parameter [Gamma] ");
|
---|
6698 | fout.println(coeff[1]);
|
---|
6699 | fout.printtab("Scaling factor [Ao] ");
|
---|
6700 | fout.println(coeff[2]);
|
---|
6701 | Regression.regressionDetails(fout, reg);
|
---|
6702 | break;
|
---|
6703 | case 6: // Gumbel [minimum]
|
---|
6704 | reg = new Regression(array, binWidth);
|
---|
6705 | reg.supressPrint();
|
---|
6706 | reg.gumbelMinPlot();
|
---|
6707 | coeff = reg.getCoeff();
|
---|
6708 | fout.println("TYPE 1 (GUMBEL) EXTREME DISTRIBUTION [MINIMUM ORDER STATISTIC]");
|
---|
6709 | fout.println("Best Estimates:");
|
---|
6710 | fout.printtab("Location parameter [mu] ");
|
---|
6711 | fout.println(coeff[0]);
|
---|
6712 | fout.printtab("Scale parameter [sigma] ");
|
---|
6713 | fout.println(coeff[1]);
|
---|
6714 | fout.printtab("Scaling factor [Ao] ");
|
---|
6715 | fout.println(coeff[2]);
|
---|
6716 | Regression.regressionDetails(fout, reg);
|
---|
6717 | break;
|
---|
6718 | case 7: // Gumbel [maximum]
|
---|
6719 | reg = new Regression(array, binWidth);
|
---|
6720 | reg.supressPrint();
|
---|
6721 | reg.gumbelMaxPlot();
|
---|
6722 | coeff = reg.getCoeff();
|
---|
6723 | fout.println("TYPE 1 (GUMBEL) EXTREME DISTRIBUTION [MAXIMUM ORDER STATISTIC]");
|
---|
6724 | fout.println("Best Estimates:");
|
---|
6725 | fout.printtab("Location parameter [mu] ");
|
---|
6726 | fout.println(coeff[0]);
|
---|
6727 | fout.printtab("Scale parameter [sigma] ");
|
---|
6728 | fout.println(coeff[1]);
|
---|
6729 | fout.printtab("Scaling factor [Ao] ");
|
---|
6730 | fout.println(coeff[2]);
|
---|
6731 | Regression.regressionDetails(fout, reg);
|
---|
6732 | break;
|
---|
6733 | case 8: // Frechet
|
---|
6734 | reg = new Regression(array, binWidth);
|
---|
6735 | reg.supressPrint();
|
---|
6736 | reg.frechetPlot();
|
---|
6737 | coeff = reg.getCoeff();
|
---|
6738 | fout.println("TYPE 2 (FRECHET) EXTREME DISTRIBUTION");
|
---|
6739 | fout.println("Best Estimates:");
|
---|
6740 | fout.printtab("Location parameter [mu] ");
|
---|
6741 | fout.println(coeff[0]);
|
---|
6742 | fout.printtab("Scale parameter [sigma] ");
|
---|
6743 | fout.println(coeff[1]);
|
---|
6744 | fout.printtab("Shape parameter [gamma] ");
|
---|
6745 | fout.println(coeff[2]);
|
---|
6746 | fout.printtab("Scaling factor [Ao] ");
|
---|
6747 | fout.println(coeff[3]);
|
---|
6748 | Regression.regressionDetails(fout, reg);
|
---|
6749 | break;
|
---|
6750 | case 9: // Weibull
|
---|
6751 | reg = new Regression(array, binWidth);
|
---|
6752 | reg.supressPrint();
|
---|
6753 | reg.weibullPlot();
|
---|
6754 | coeff = reg.getCoeff();
|
---|
6755 | fout.println("TYPE 3 (WEIBULL) EXTREME DISTRIBUTION");
|
---|
6756 | fout.println("Best Estimates:");
|
---|
6757 | fout.printtab("Location parameter [mu] ");
|
---|
6758 | fout.println(coeff[0]);
|
---|
6759 | fout.printtab("Scale parameter [sigma] ");
|
---|
6760 | fout.println(coeff[1]);
|
---|
6761 | fout.printtab("Shape parameter [gamma] ");
|
---|
6762 | fout.println(coeff[2]);
|
---|
6763 | fout.printtab("Scaling factor [Ao] ");
|
---|
6764 | fout.println(coeff[3]);
|
---|
6765 | Regression.regressionDetails(fout, reg);
|
---|
6766 | break;
|
---|
6767 | case 10: // Exponential
|
---|
6768 | reg = new Regression(array, binWidth);
|
---|
6769 | reg.supressPrint();
|
---|
6770 | reg.exponentialPlot();
|
---|
6771 | coeff = reg.getCoeff();
|
---|
6772 | fout.println("EXPONENTIAL DISTRIBUTION");
|
---|
6773 | fout.println("Best Estimates:");
|
---|
6774 | fout.printtab("Location parameter [mu] ");
|
---|
6775 | fout.println(coeff[0]);
|
---|
6776 | fout.printtab("Scale parameter [sigma] ");
|
---|
6777 | fout.println(coeff[1]);
|
---|
6778 | fout.printtab("Scaling factor [Ao] ");
|
---|
6779 | fout.println(coeff[2]);
|
---|
6780 | Regression.regressionDetails(fout, reg);
|
---|
6781 | break;
|
---|
6782 | case 11: // Rayleigh
|
---|
6783 | reg = new Regression(array, binWidth);
|
---|
6784 | reg.supressPrint();
|
---|
6785 | reg.rayleighPlot();
|
---|
6786 | coeff = reg.getCoeff();
|
---|
6787 | fout.println("RAYLEIGH DISTRIBUTION");
|
---|
6788 | fout.println("Best Estimates:");
|
---|
6789 | fout.printtab("Scale parameter [beta] ");
|
---|
6790 | fout.println(coeff[0]);
|
---|
6791 | fout.printtab("Scaling factor [Ao] ");
|
---|
6792 | fout.println(coeff[1]);
|
---|
6793 | Regression.regressionDetails(fout, reg);
|
---|
6794 | break;
|
---|
6795 | case 12: // Pareto
|
---|
6796 | reg = new Regression(array, binWidth);
|
---|
6797 | reg.supressPrint();
|
---|
6798 | reg.paretoThreeParPlot();
|
---|
6799 | coeff = reg.getCoeff();
|
---|
6800 | fout.println("PARETO DISTRIBUTION");
|
---|
6801 | fout.println("Best Estimates:");
|
---|
6802 | fout.printtab("Shape parameter [alpha] ");
|
---|
6803 | fout.println(coeff[0]);
|
---|
6804 | fout.printtab("Scale parameter [beta] ");
|
---|
6805 | fout.println(coeff[1]);
|
---|
6806 | fout.printtab("Threshold parameter [theta] ");
|
---|
6807 | fout.println(coeff[2]);
|
---|
6808 | fout.printtab("Scaling factor [Ao] ");
|
---|
6809 | fout.println(coeff[3]);
|
---|
6810 | Regression.regressionDetails(fout, reg);
|
---|
6811 | break;
|
---|
6812 | case 13: // Beta
|
---|
6813 | reg = new Regression(array, binWidth);
|
---|
6814 | reg.supressPrint();
|
---|
6815 | reg.betaMinMaxPlot();
|
---|
6816 | coeff = reg.getCoeff();
|
---|
6817 | fout.println("BETA DISTRIBUTION");
|
---|
6818 | fout.println("Best Estimates:");
|
---|
6819 | fout.printtab("Shape parameter [alpha] ");
|
---|
6820 | fout.println(coeff[0]);
|
---|
6821 | fout.printtab("Shape parameter [beta] ");
|
---|
6822 | fout.println(coeff[1]);
|
---|
6823 | fout.printtab("minimum limit [min] ");
|
---|
6824 | fout.println(coeff[2]);
|
---|
6825 | fout.printtab("maximum limit [max] ");
|
---|
6826 | fout.println(coeff[3]);
|
---|
6827 | fout.printtab("Scaling factor [Ao] ");
|
---|
6828 | fout.println(coeff[4]);
|
---|
6829 | Regression.regressionDetails(fout, reg);
|
---|
6830 | break;
|
---|
6831 | case 14: // Gamma
|
---|
6832 | reg = new Regression(array, binWidth);
|
---|
6833 | reg.supressPrint();
|
---|
6834 | reg.gammaPlot();
|
---|
6835 | coeff = reg.getCoeff();
|
---|
6836 | fout.println("GAMMA DISTRIBUTION");
|
---|
6837 | fout.println("Best Estimates:");
|
---|
6838 | fout.printtab("Location parameter [mu] ");
|
---|
6839 | fout.println(coeff[0]);
|
---|
6840 | fout.printtab("Scale parameter [beta] ");
|
---|
6841 | fout.println(coeff[1]);
|
---|
6842 | fout.printtab("Shape parameter [gamma] ");
|
---|
6843 | fout.println(coeff[2]);
|
---|
6844 | fout.printtab("Scaling factor [Ao] ");
|
---|
6845 | fout.println(coeff[3]);
|
---|
6846 | Regression.regressionDetails(fout, reg);
|
---|
6847 | break;
|
---|
6848 | case 15: // Erlang
|
---|
6849 | reg = new Regression(array, binWidth);
|
---|
6850 | reg.supressPrint();
|
---|
6851 | reg.erlangPlot();
|
---|
6852 | coeff = reg.getCoeff();
|
---|
6853 | fout.println("ERLANG DISTRIBUTION");
|
---|
6854 | fout.println("Best Estimates:");
|
---|
6855 | fout.printtab("Shape parameter [lambda] ");
|
---|
6856 | fout.println(coeff[0]);
|
---|
6857 | fout.printtab("Rate parameter [k] ");
|
---|
6858 | fout.println(reg.getKayValue());
|
---|
6859 | fout.printtab("Scaling factor [Ao] ");
|
---|
6860 | fout.println(coeff[1]);
|
---|
6861 | Regression.regressionDetails(fout, reg);
|
---|
6862 | break;
|
---|
6863 | case 16: // exit
|
---|
6864 | default: fout.close();
|
---|
6865 | testDistType = false;
|
---|
6866 | }
|
---|
6867 | }
|
---|
6868 | }
|
---|
6869 |
|
---|
6870 | // Output method for fitOneOrSeveralDistributions
|
---|
6871 | protected static void regressionDetails(FileOutput fout, Regression reg){
|
---|
6872 | fout.println();
|
---|
6873 | fout.println("Regression details:");
|
---|
6874 | fout.printtab("Chi squared: ");
|
---|
6875 | fout.println(reg.getChiSquare());
|
---|
6876 | fout.printtab("Reduced chi squared: ");
|
---|
6877 | fout.println(reg.getReducedChiSquare());
|
---|
6878 | fout.printtab("Sum of squares: ");
|
---|
6879 | fout.println(reg.getSumOfSquares());
|
---|
6880 | fout.printtab("Degrees of freedom: ");
|
---|
6881 | fout.println(reg.getDegFree());
|
---|
6882 | fout.printtab("Number of iterations: ");
|
---|
6883 | fout.println(reg.getNiter());
|
---|
6884 | fout.printtab("maximum number of iterations allowed: ");
|
---|
6885 | fout.println(reg.getNmax());
|
---|
6886 | fout.println();
|
---|
6887 | fout.println();
|
---|
6888 | }
|
---|
6889 |
|
---|
6890 | // Get the x-y Correlation Coefficient
|
---|
6891 | public double getXYcorrCoeff(){
|
---|
6892 | return this.xyR;
|
---|
6893 | }
|
---|
6894 |
|
---|
6895 | // Get the y-y Correlation Coefficient
|
---|
6896 | public double getYYcorrCoeff(){
|
---|
6897 | return this.yyR;
|
---|
6898 | }
|
---|
6899 |
|
---|
6900 | // check data arrays for sign, maximum, minimum and peak
|
---|
6901 | protected static ArrayList<Object> dataSign(double[] data){
|
---|
6902 |
|
---|
6903 | ArrayList<Object> ret = new ArrayList<Object>();
|
---|
6904 | int n = data.length;
|
---|
6905 |
|
---|
6906 | double max=data[0]; // maximum
|
---|
6907 | int maxi=0; // index of above
|
---|
6908 | double min=data[0]; // minimum
|
---|
6909 | int mini=0; // index of above
|
---|
6910 | double peak=0.0D; // peak: larger of maximum and any abs(negative minimum)
|
---|
6911 | int peaki=-1; // index of above
|
---|
6912 | int signFlag=-1; // 0 all positive; 1 all negative; 2 positive and negative
|
---|
6913 | double shift=0.0D; // shift to make all positive if a mixture of positive and negative
|
---|
6914 | double mean = 0.0D; // mean value
|
---|
6915 | int signCheckZero=0; // number of zero values
|
---|
6916 | int signCheckNeg=0; // number of positive values
|
---|
6917 | int signCheckPos=0; // number of negative values
|
---|
6918 |
|
---|
6919 | for(int i=0; i<n; i++){
|
---|
6920 | mean =+ data[i];
|
---|
6921 | if(data[i]>max){
|
---|
6922 | max=data[i];
|
---|
6923 | maxi=i;
|
---|
6924 | }
|
---|
6925 | if(data[i]<min){
|
---|
6926 | min=data[i];
|
---|
6927 | mini=i;
|
---|
6928 | }
|
---|
6929 | if(data[i]==0.0D)signCheckZero++;
|
---|
6930 | if(data[i]>0.0D)signCheckPos++;
|
---|
6931 | if(data[i]<0.0D)signCheckNeg++;
|
---|
6932 | }
|
---|
6933 | mean /= (double)n;
|
---|
6934 |
|
---|
6935 | if((signCheckZero+signCheckPos)==n){
|
---|
6936 | peak=max;
|
---|
6937 | peaki=maxi;
|
---|
6938 | signFlag=0;
|
---|
6939 | }
|
---|
6940 | else{
|
---|
6941 | if((signCheckZero+signCheckNeg)==n){
|
---|
6942 | peak=min;
|
---|
6943 | peaki=mini;
|
---|
6944 | signFlag=1;
|
---|
6945 | }
|
---|
6946 | else{
|
---|
6947 | peak=max;
|
---|
6948 | peaki=maxi;
|
---|
6949 | if(-min>max){
|
---|
6950 | peak=min;
|
---|
6951 | peak=mini;
|
---|
6952 | }
|
---|
6953 | signFlag=2;
|
---|
6954 | shift=-min;
|
---|
6955 | }
|
---|
6956 | }
|
---|
6957 |
|
---|
6958 | // transfer results to the ArrayList
|
---|
6959 | ret.add(new Double(min));
|
---|
6960 | ret.add(new Integer(mini));
|
---|
6961 | ret.add(new Double(max));
|
---|
6962 | ret.add(new Integer(maxi));
|
---|
6963 | ret.add(new Double(peak));
|
---|
6964 | ret.add(new Integer(peaki));
|
---|
6965 | ret.add(new Integer(signFlag));
|
---|
6966 | ret.add(new Double(shift));
|
---|
6967 | ret.add(new Double(mean));
|
---|
6968 | ret.add(new Integer(signCheckZero));
|
---|
6969 | ret.add(new Integer(signCheckPos));
|
---|
6970 | ret.add(new Integer(signCheckNeg));
|
---|
6971 |
|
---|
6972 |
|
---|
6973 | return ret;
|
---|
6974 | }
|
---|
6975 |
|
---|
6976 | public void frechet(){
|
---|
6977 | this.fitFrechet(0, 0);
|
---|
6978 | }
|
---|
6979 |
|
---|
6980 | public void frechetPlot(){
|
---|
6981 | this.fitFrechet(1, 0);
|
---|
6982 | }
|
---|
6983 |
|
---|
6984 | public void frechetTwoPar(){
|
---|
6985 | this.fitFrechet(0, 1);
|
---|
6986 | }
|
---|
6987 |
|
---|
6988 | public void frechetTwoParPlot(){
|
---|
6989 | this.fitFrechet(1, 1);
|
---|
6990 | }
|
---|
6991 |
|
---|
6992 | public void frechetStandard(){
|
---|
6993 | this.fitFrechet(0, 2);
|
---|
6994 | }
|
---|
6995 |
|
---|
6996 | public void frechetStandardPlot(){
|
---|
6997 | this.fitFrechet(1, 2);
|
---|
6998 | }
|
---|
6999 |
|
---|
7000 | protected void fitFrechet(int allTest, int typeFlag){
|
---|
7001 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
7002 | this.userSupplied = false;
|
---|
7003 | switch(typeFlag){
|
---|
7004 | case 0: this.lastMethod=13;
|
---|
7005 | this.nTerms=4;
|
---|
7006 | break;
|
---|
7007 | case 1: this.lastMethod=14;
|
---|
7008 | this.nTerms=3;
|
---|
7009 | break;
|
---|
7010 | case 2: this.lastMethod=15;
|
---|
7011 | this.nTerms=2;
|
---|
7012 | break;
|
---|
7013 | }
|
---|
7014 | if(!this.scaleFlag)this.nTerms=this.nTerms-1;
|
---|
7015 | this.frechetWeibull=true;
|
---|
7016 | this.fitFrechetWeibull(allTest, typeFlag);
|
---|
7017 | }
|
---|
7018 |
|
---|
7019 | // method for fitting data to either a Frechet or a Weibull distribution
|
---|
7020 | protected void fitFrechetWeibull(int allTest, int typeFlag){
|
---|
7021 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
7022 | this.linNonLin = false;
|
---|
7023 | this.zeroCheck = false;
|
---|
7024 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
7025 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
7026 |
|
---|
7027 | // order data into ascending order of the abscissae
|
---|
7028 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
7029 |
|
---|
7030 | // check y data
|
---|
7031 | Double tempd=null;
|
---|
7032 | ArrayList<Object> retY = Regression.dataSign(yData);
|
---|
7033 | tempd = (Double)retY.get(4);
|
---|
7034 | double yPeak = tempd.doubleValue();
|
---|
7035 | Integer tempi = null;
|
---|
7036 | tempi = (Integer)retY.get(5);
|
---|
7037 | int peaki = tempi.intValue();
|
---|
7038 | tempd = (Double)retY.get(8);
|
---|
7039 | double mean = tempd.doubleValue();
|
---|
7040 |
|
---|
7041 | // check for infinity
|
---|
7042 | boolean testInf = true;
|
---|
7043 | double dof = this.degreesOfFreedom;
|
---|
7044 | while(testInf){
|
---|
7045 | if(this.infinityCheck(yPeak, peaki)){
|
---|
7046 | dof--;
|
---|
7047 | if(dof<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("The effective degrees of freedom have been reduced to zero");
|
---|
7048 | retY = Regression.dataSign(yData);
|
---|
7049 | tempd = (Double)retY.get(4);
|
---|
7050 | yPeak = tempd.doubleValue();
|
---|
7051 | tempi = (Integer)retY.get(5);
|
---|
7052 | peaki = tempi.intValue();
|
---|
7053 | tempd = (Double)retY.get(8);
|
---|
7054 | mean = tempd.doubleValue();
|
---|
7055 | }
|
---|
7056 | else{
|
---|
7057 | testInf = false;
|
---|
7058 | }
|
---|
7059 | }
|
---|
7060 |
|
---|
7061 | // check sign of y data
|
---|
7062 | String ss = "Weibull";
|
---|
7063 | if(this.frechetWeibull)ss = "Frechet";
|
---|
7064 | boolean ySignFlag = false;
|
---|
7065 | if(yPeak<0.0D){
|
---|
7066 | this.reverseYsign(ss);
|
---|
7067 | retY = Regression.dataSign(this.yData);
|
---|
7068 | yPeak = -yPeak;
|
---|
7069 | ySignFlag = true;
|
---|
7070 | }
|
---|
7071 |
|
---|
7072 | // check y values for all very small values
|
---|
7073 | boolean magCheck=false;
|
---|
7074 | double magScale = this.checkYallSmall(yPeak, ss);
|
---|
7075 | if(magScale!=1.0D){
|
---|
7076 | magCheck=true;
|
---|
7077 | yPeak=1.0D;
|
---|
7078 | }
|
---|
7079 |
|
---|
7080 | // minimum value of x
|
---|
7081 | ArrayList<Object> retX = Regression.dataSign(this.xData[0]);
|
---|
7082 | tempd = (Double)retX.get(0);
|
---|
7083 | double xMin = tempd.doubleValue();
|
---|
7084 |
|
---|
7085 | // maximum value of x
|
---|
7086 | tempd = (Double)retX.get(2);
|
---|
7087 | double xMax = tempd.doubleValue();
|
---|
7088 |
|
---|
7089 | // Calculate x value at peak y (estimate of the 'distribution mode')
|
---|
7090 | double distribMode = xData[0][peaki];
|
---|
7091 |
|
---|
7092 | // Calculate an estimate of the half-height width
|
---|
7093 | double sd = Math.log(2.0D)*halfWidth(xData[0], yData);
|
---|
7094 |
|
---|
7095 | // Save x-y-w data
|
---|
7096 | double[] xx = new double[this.nData];
|
---|
7097 | double[] yy = new double[this.nData];
|
---|
7098 | double[] ww = new double[this.nData];
|
---|
7099 |
|
---|
7100 | for(int i=0; i<this.nData; i++){
|
---|
7101 | xx[i]=this.xData[0][i];
|
---|
7102 | yy[i]=this.yData[i];
|
---|
7103 | ww[i]=this.weight[i];
|
---|
7104 | }
|
---|
7105 |
|
---|
7106 | // Calculate the cumulative probability and return ordinate scaling factor estimate
|
---|
7107 | double[] cumX = new double[this.nData];
|
---|
7108 | double[] cumY = new double[this.nData];
|
---|
7109 | double[] cumW = new double[this.nData];
|
---|
7110 | ErrorProp[] cumYe = ErrorProp.oneDarray(this.nData);
|
---|
7111 | double yScale = this.calculateCumulativeValues(cumX, cumY, cumW, cumYe, peaki, yPeak, distribMode, ss);
|
---|
7112 |
|
---|
7113 | //Calculate loglog v log transforms
|
---|
7114 | if(this.frechetWeibull){
|
---|
7115 | for(int i=0; i<this.nData; i++){
|
---|
7116 | cumYe[i] = ErrorProp.over(1.0D, cumYe[i]);
|
---|
7117 | cumYe[i] = ErrorProp.log(cumYe[i]);
|
---|
7118 | cumYe[i] = ErrorProp.log(cumYe[i]);
|
---|
7119 | cumY[i] = cumYe[i].getValue();
|
---|
7120 | cumW[i] = cumYe[i].getError();
|
---|
7121 | }
|
---|
7122 | }
|
---|
7123 | else{
|
---|
7124 | for(int i=0; i<this.nData; i++){
|
---|
7125 | cumYe[i] = ErrorProp.minus(1.0D,cumYe[i]);
|
---|
7126 | cumYe[i] = ErrorProp.over(1.0D, cumYe[i]);
|
---|
7127 | cumYe[i] = ErrorProp.log(cumYe[i]);
|
---|
7128 | cumYe[i] = ErrorProp.log(cumYe[i]);
|
---|
7129 | cumY[i] = cumYe[i].getValue();
|
---|
7130 | cumW[i] = cumYe[i].getError();
|
---|
7131 | }
|
---|
7132 | }
|
---|
7133 |
|
---|
7134 | // Fill data arrays with transformed data
|
---|
7135 | for(int i =0; i<this.nData; i++){
|
---|
7136 | xData[0][i] = cumX[i];
|
---|
7137 | yData[i] = cumY[i];
|
---|
7138 | weight[i] = cumW[i];
|
---|
7139 | }
|
---|
7140 | boolean weightOptHold = this.weightOpt;
|
---|
7141 | this.weightOpt=true;
|
---|
7142 |
|
---|
7143 | // Nelder and Mead Simplex Regression for semi-linearised Frechet or Weibull
|
---|
7144 | // disable statistical analysis
|
---|
7145 | boolean statFlagHold = this.statFlag;
|
---|
7146 | this.statFlag=false;
|
---|
7147 |
|
---|
7148 | // Fill arrays needed by the Simplex
|
---|
7149 | double[] start = new double[this.nTerms];
|
---|
7150 | double[] step = new double[this.nTerms];
|
---|
7151 | for(int i=0; i<this.nTerms; i++){
|
---|
7152 | start[i]=1.0D;
|
---|
7153 | step[i]=0.2D;
|
---|
7154 | }
|
---|
7155 | double[] gammamin = null;
|
---|
7156 | double gammat = 0;
|
---|
7157 | switch(typeFlag){
|
---|
7158 | case 0:
|
---|
7159 | start[0] = xMin - Math.abs(0.1D*xMin); //mu
|
---|
7160 | start[1] = sd; //sigma
|
---|
7161 | start[2] = 4.0; // gamma
|
---|
7162 | // step sizes
|
---|
7163 | step[0] = 0.2D*start[0];
|
---|
7164 | if(step[0]==0.0D){
|
---|
7165 | ArrayList<Object> ret0 = Regression.dataSign(xData[0]);
|
---|
7166 | Double tempdd = null;
|
---|
7167 | tempdd = (Double)ret0.get(2);
|
---|
7168 | double xmax = tempdd.doubleValue();
|
---|
7169 | if(xmax==0.0D){
|
---|
7170 | tempdd = (Double)ret0.get(0);
|
---|
7171 | xmax = tempdd.doubleValue();
|
---|
7172 | }
|
---|
7173 | step[0]=xmax*0.1D;
|
---|
7174 | }
|
---|
7175 | step[1] = 0.2D*start[1];
|
---|
7176 | step[2] = 0.5D*start[2];
|
---|
7177 | this.addConstraint(0,+1,xMin);
|
---|
7178 | this.addConstraint(1,-1,0.0D);
|
---|
7179 | this.addConstraint(2,-1,0.0D);
|
---|
7180 | break;
|
---|
7181 | case 1: start[0] = sd; //sigma
|
---|
7182 | start[1] = 4.0; // gamma
|
---|
7183 | // step sizes
|
---|
7184 | step[0] = 0.2D*start[0];
|
---|
7185 | step[1] = 0.5D*start[1];
|
---|
7186 | this.addConstraint(0,-1,0.0D);
|
---|
7187 | this.addConstraint(1,-1,0.0D);
|
---|
7188 | break;
|
---|
7189 | case 2: start[0] = 4.0; // gamma
|
---|
7190 | // step size
|
---|
7191 | step[0] = 0.5D*start[0];
|
---|
7192 | this.addConstraint(0,-1,0.0D);
|
---|
7193 | break;
|
---|
7194 | }
|
---|
7195 |
|
---|
7196 | // Create instance of loglog function and perform regression
|
---|
7197 | if(this.frechetWeibull){
|
---|
7198 | FrechetFunctionTwo f = new FrechetFunctionTwo();
|
---|
7199 | f.typeFlag = typeFlag;
|
---|
7200 | Object regFun2 = (Object)f;
|
---|
7201 | System.out.println("pppp " + start[0] + " " + start[1] + " " + start[2]);
|
---|
7202 |
|
---|
7203 | this.nelderMead(regFun2, start, step, this.fTol, this.nMax);
|
---|
7204 | }
|
---|
7205 | else{
|
---|
7206 | WeibullFunctionTwo f = new WeibullFunctionTwo();
|
---|
7207 | f.typeFlag = typeFlag;
|
---|
7208 | Object regFun2 = (Object)f;
|
---|
7209 | this.nelderMead(regFun2, start, step, this.fTol, this.nMax);
|
---|
7210 | }
|
---|
7211 |
|
---|
7212 | // Get best estimates of loglog regression
|
---|
7213 | double[] ests = Conv.copy(this.best);
|
---|
7214 |
|
---|
7215 | // Nelder and Mead Simplex Regression for Frechet or Weibull
|
---|
7216 | // using best estimates from loglog regression as initial estimates
|
---|
7217 |
|
---|
7218 | // re-enable statistical analysis if statFlag was set to true
|
---|
7219 | this.statFlag = statFlagHold;
|
---|
7220 |
|
---|
7221 | // restore data reversing the loglog transform but maintaining any sign reversals
|
---|
7222 | this.weightOpt=weightOptHold;
|
---|
7223 | for(int i =0; i<this.nData; i++){
|
---|
7224 | xData[0][i] = xx[i];
|
---|
7225 | yData[i] = yy[i];
|
---|
7226 | weight[i] = ww[i];
|
---|
7227 | }
|
---|
7228 |
|
---|
7229 | // Fill arrays needed by the Simplex
|
---|
7230 | switch(typeFlag){
|
---|
7231 | case 0: start[0] = ests[0]; //mu
|
---|
7232 | start[1] = ests[1]; //sigma
|
---|
7233 | start[2] = ests[2]; //gamma
|
---|
7234 | if(this.scaleFlag){
|
---|
7235 | start[3] = 1.0/yScale; //y axis scaling factor
|
---|
7236 | }
|
---|
7237 | step[0] = 0.1D*start[0];
|
---|
7238 | if(step[0]==0.0D){
|
---|
7239 | ArrayList<Object> ret0 = Regression.dataSign(xData[0]);
|
---|
7240 | Double tempdd = null;
|
---|
7241 | tempdd = (Double)ret0.get(2);
|
---|
7242 | double xmax = tempdd.doubleValue();
|
---|
7243 | if(xmax==0.0D){
|
---|
7244 | tempdd = (Double)ret0.get(0);
|
---|
7245 | xmax = tempdd.doubleValue();
|
---|
7246 | }
|
---|
7247 | step[0]=xmax*0.1D;
|
---|
7248 | }
|
---|
7249 | step[1] = 0.1D*start[1];
|
---|
7250 | step[2] = 0.1D*start[2];
|
---|
7251 | if(this.scaleFlag){
|
---|
7252 | step[3] = 0.1D*start[3];
|
---|
7253 | }
|
---|
7254 | break;
|
---|
7255 | case 1: start[0] = ests[0]; //sigma
|
---|
7256 | start[1] = ests[1]; //gamma
|
---|
7257 | if(this.scaleFlag){
|
---|
7258 | start[2] = 1.0/yScale; //y axis scaling factor
|
---|
7259 | }
|
---|
7260 | step[0] = 0.1D*start[0];
|
---|
7261 | step[1] = 0.1D*start[1];
|
---|
7262 | if(this.scaleFlag)step[2] = 0.1D*start[2];
|
---|
7263 | break;
|
---|
7264 | case 2: start[0] = ests[0]; //gamma
|
---|
7265 | if(this.scaleFlag){
|
---|
7266 | start[1] = 1.0/yScale; //y axis scaling factor
|
---|
7267 | }
|
---|
7268 | step[0] = 0.1D*start[0];
|
---|
7269 | if(this.scaleFlag)step[1] = 0.1D*start[1];
|
---|
7270 | break;
|
---|
7271 | }
|
---|
7272 |
|
---|
7273 | // Create instance of Frechet function and perform regression
|
---|
7274 | if(this.frechetWeibull){
|
---|
7275 | FrechetFunctionOne ff = new FrechetFunctionOne();
|
---|
7276 | ff.typeFlag = typeFlag;
|
---|
7277 | ff.scaleOption = this.scaleFlag;
|
---|
7278 | ff.scaleFactor = this.yScaleFactor;
|
---|
7279 | Object regFun3 = (Object)ff;
|
---|
7280 | this.nelderMead(regFun3, start, step, this.fTol, this.nMax);
|
---|
7281 | if(allTest==1){
|
---|
7282 | // Print results
|
---|
7283 | if(!this.supressPrint)this.print();
|
---|
7284 | // Plot results
|
---|
7285 | int flag = this.plotXY(ff);
|
---|
7286 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
7287 | }
|
---|
7288 | }
|
---|
7289 | else{
|
---|
7290 | WeibullFunctionOne ff = new WeibullFunctionOne();
|
---|
7291 | ff.typeFlag = typeFlag;
|
---|
7292 | ff.scaleOption = this.scaleFlag;
|
---|
7293 | ff.scaleFactor = this.yScaleFactor;
|
---|
7294 | Object regFun3 = (Object)ff;
|
---|
7295 | this.nelderMead(regFun3, start, step, this.fTol, this.nMax);
|
---|
7296 | if(allTest==1){
|
---|
7297 | // Print results
|
---|
7298 | if(!this.supressPrint)this.print();
|
---|
7299 | // Plot results
|
---|
7300 | int flag = this.plotXY(ff);
|
---|
7301 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
7302 | }
|
---|
7303 | }
|
---|
7304 |
|
---|
7305 | // restore data
|
---|
7306 | this.weightOpt = weightOptHold;
|
---|
7307 | if(magCheck){
|
---|
7308 | for(int i =0; i<this.nData; i++){
|
---|
7309 | this.yData[i] = yy[i]/magScale;
|
---|
7310 | if(this.weightOpt)this.weight[i] = ww[i]/magScale;
|
---|
7311 | }
|
---|
7312 | }
|
---|
7313 | if(ySignFlag){
|
---|
7314 | for(int i =0; i<this.nData; i++){
|
---|
7315 | this.yData[i]=-this.yData[i];
|
---|
7316 | }
|
---|
7317 | }
|
---|
7318 | }
|
---|
7319 |
|
---|
7320 | // Check for y value = infinity
|
---|
7321 | public boolean infinityCheck(double yPeak, int peaki){
|
---|
7322 | boolean flag=false;
|
---|
7323 | if(yPeak == 1.0D/0.0D || yPeak == -1.0D/0.0D){
|
---|
7324 | int ii = peaki+1;
|
---|
7325 | if(peaki==this.nData-1)ii = peaki-1;
|
---|
7326 | this.xData[0][peaki]=this.xData[0][ii];
|
---|
7327 | this.yData[peaki]=this.yData[ii];
|
---|
7328 | this.weight[peaki]=this.weight[ii];
|
---|
7329 | System.out.println("An infinty has been removed at point "+peaki);
|
---|
7330 | flag = true;
|
---|
7331 | }
|
---|
7332 | return flag;
|
---|
7333 | }
|
---|
7334 |
|
---|
7335 | // reverse sign of y values if negative
|
---|
7336 | public void reverseYsign(String ss){
|
---|
7337 | System.out.println("This implementation of the " + ss + " distributions takes only positive y values\n(noise taking low values below zero are allowed)");
|
---|
7338 | System.out.println("All y values have been multiplied by -1 before fitting");
|
---|
7339 | for(int i =0; i<this.nData; i++){
|
---|
7340 | this.yData[i] = -this.yData[i];
|
---|
7341 | }
|
---|
7342 | }
|
---|
7343 |
|
---|
7344 | // check y values for all y are very small value
|
---|
7345 | public double checkYallSmall(double yPeak, String ss){
|
---|
7346 | double magScale = 1.0D;
|
---|
7347 | double recipYpeak = Fmath.truncate(1.0/yPeak, 4);
|
---|
7348 | if(yPeak<1e-4){
|
---|
7349 | System.out.println(ss + " fitting: The ordinate axis (y axis) has been rescaled by "+recipYpeak+" to reduce rounding errors");
|
---|
7350 | for(int i=0; i<this.nData; i++){
|
---|
7351 | this.yData[i]*=recipYpeak;
|
---|
7352 | if(this.weightOpt)this.weight[i]*=recipYpeak;
|
---|
7353 | }
|
---|
7354 | magScale=recipYpeak;
|
---|
7355 | }
|
---|
7356 | return magScale;
|
---|
7357 | }
|
---|
7358 |
|
---|
7359 | // Calculate cumulative values for distributions with a single independent variable
|
---|
7360 | // Entered parameters
|
---|
7361 | // peaki - index of the y value peak
|
---|
7362 | // yPeak - y value of the y peak
|
---|
7363 | // distribMode - x value at peak y (estimate of the 'distribution mode')
|
---|
7364 | // ss - name of the distribution to be fitted, e.g. "Frechet"
|
---|
7365 | // Returns:
|
---|
7366 | // return statement - an estimate of the scaling factor
|
---|
7367 | // cumX - x data as a one dimensional array with zero values replaced by average of adjacent values
|
---|
7368 | // cumY - cumulative y values
|
---|
7369 | // cumW - cumulative y weight values
|
---|
7370 | // cumYe - cumulative Y values as ErrorProp
|
---|
7371 | public double calculateCumulativeValues(double[] cumX, double[] cumY, double[] cumW, ErrorProp[] cumYe, int peaki, double yPeak, double distribMode, String ss){
|
---|
7372 |
|
---|
7373 | // Put independent values into a one-dimensional array
|
---|
7374 | cumX[0]= this.xData[0][0];
|
---|
7375 | for(int i=1; i<this.nData; i++){
|
---|
7376 | cumX[i] = this.xData[0][i];
|
---|
7377 | }
|
---|
7378 |
|
---|
7379 | // Create an array of ErrorProps from the independent values and their weights
|
---|
7380 | ErrorProp[] yE = ErrorProp.oneDarray(this.nData);
|
---|
7381 | for(int i=0; i<this.nData; i++){
|
---|
7382 | yE[i].reset(this.yData[i], this.weight[i]);
|
---|
7383 | }
|
---|
7384 |
|
---|
7385 | // check on shape of data for first step of cumulative calculation
|
---|
7386 | if(peaki!=0){
|
---|
7387 | if(peaki==this.nData-1){
|
---|
7388 | System.out.println("The data does not cover a wide enough range of x values to fit to a " + ss + " distribution with any accuracy");
|
---|
7389 | System.out.println("The regression will be attempted but you should treat any result with great caution");
|
---|
7390 | }
|
---|
7391 | if(this.yData[0]<this.yData[1]*0.5D && this.yData[0]>distribMode*0.02D){
|
---|
7392 | ErrorProp x0 = new ErrorProp(0.0D, 0.0D);
|
---|
7393 | x0 = yE[0].times(this.xData[0][1]-this.xData[0][0]);
|
---|
7394 | x0 = x0.over(yE[1].minus(yE[0]));
|
---|
7395 | x0 = ErrorProp.minus(this.xData[0][0],x0);
|
---|
7396 | if(this.yData[0]>=0.9D*yPeak)x0=(x0.plus(this.xData[0][0])).over(2.0D);
|
---|
7397 | if(x0.getValue()<0.0D)x0.reset(0.0D, 0.0D);
|
---|
7398 | cumYe[0] = yE[0].over(2.0D);
|
---|
7399 | cumYe[0] = cumYe[0].times(ErrorProp.minus(this.xData[0][0], x0));
|
---|
7400 | }
|
---|
7401 | else{
|
---|
7402 | cumYe[0].reset(0.0D, this.weight[0]);
|
---|
7403 | }
|
---|
7404 | }
|
---|
7405 | else{
|
---|
7406 | cumYe[0].reset(0.0D, this.weight[0]);
|
---|
7407 |
|
---|
7408 | }
|
---|
7409 |
|
---|
7410 | // cumulative calculation for rest of the points (trapezium approximation)
|
---|
7411 | for(int i=1; i<this.nData; i++){
|
---|
7412 | cumYe[i] = yE[i].plus(yE[i-1]);
|
---|
7413 | cumYe[i] = cumYe[i].over(2.0D);
|
---|
7414 | cumYe[i] = cumYe[i].times(this.xData[0][i]-this.xData[0][i-1]);
|
---|
7415 | cumYe[i] = cumYe[i].plus(cumYe[i-1]);
|
---|
7416 | }
|
---|
7417 |
|
---|
7418 | // check on shape of data for final step of cumulative calculation
|
---|
7419 | ErrorProp cumYtotal = cumYe[this.nData-1].copy();
|
---|
7420 | if(peaki==this.nData-1){
|
---|
7421 | cumYtotal = cumYtotal.times(2.0D);
|
---|
7422 | }
|
---|
7423 | else{
|
---|
7424 | if(this.yData[this.nData-1]<yData[this.nData-2]*0.5D && yData[this.nData-1]>distribMode*0.02D){
|
---|
7425 | ErrorProp xn = new ErrorProp();
|
---|
7426 | xn = yE[this.nData-1].times(this.xData[0][this.nData-2]-this.xData[0][this.nData-1]);
|
---|
7427 | xn = xn.over(yE[this.nData-2].minus(yE[this.nData-1]));
|
---|
7428 | xn = ErrorProp.minus(this.xData[0][this.nData-1], xn);
|
---|
7429 | if(this.yData[0]>=0.9D*yPeak)xn=(xn.plus(this.xData[0][this.nData-1])).over(2.0D);
|
---|
7430 | cumYtotal = cumYtotal.plus(ErrorProp.times(0.5D,(yE[this.nData-1].times(xn.minus(this.xData[0][this.nData-1])))));
|
---|
7431 | }
|
---|
7432 | }
|
---|
7433 |
|
---|
7434 | // Fill cumulative Y and W arrays
|
---|
7435 | for(int i=0; i<this.nData; i++){
|
---|
7436 | cumY[i]=cumYe[i].getValue();
|
---|
7437 | cumW[i]=cumYe[i].getError();
|
---|
7438 | }
|
---|
7439 |
|
---|
7440 | // estimate y scaling factor
|
---|
7441 | double yScale = 1.0D/cumYtotal.getValue();
|
---|
7442 | for(int i=0; i<this.nData; i++){
|
---|
7443 | cumYe[i]=cumYe[i].over(cumYtotal);
|
---|
7444 | }
|
---|
7445 |
|
---|
7446 | // check for zero and negative values
|
---|
7447 | int jj = 0;
|
---|
7448 | boolean test = true;
|
---|
7449 | for(int i=0; i<this.nData; i++){
|
---|
7450 | if(cumYe[i].getValue()<=0.0D){
|
---|
7451 | if(i<=jj){
|
---|
7452 | test=true;
|
---|
7453 | jj = i;
|
---|
7454 | while(test){
|
---|
7455 | jj++;
|
---|
7456 | if(jj>=this.nData)throw new ArithmeticException("all zero cumulative data!!");
|
---|
7457 | if(cumYe[jj].getValue()>0.0D){
|
---|
7458 | cumYe[i]=cumYe[jj].copy();
|
---|
7459 | cumX[i]=cumX[jj];
|
---|
7460 | test=false;
|
---|
7461 | }
|
---|
7462 | }
|
---|
7463 | }
|
---|
7464 | else{
|
---|
7465 | if(i==this.nData-1){
|
---|
7466 | cumYe[i]=cumYe[i-1].copy();
|
---|
7467 | cumX[i]=cumX[i-1];
|
---|
7468 | }
|
---|
7469 | else{
|
---|
7470 | cumYe[i]=cumYe[i-1].plus(cumYe[i+1]);
|
---|
7471 | cumYe[i]=cumYe[i].over(2.0D);
|
---|
7472 | cumX[i]=(cumX[i-1]+cumX[i+1])/2.0D;
|
---|
7473 | }
|
---|
7474 | }
|
---|
7475 | }
|
---|
7476 | }
|
---|
7477 |
|
---|
7478 | // check for unity value
|
---|
7479 | jj = this.nData-1;
|
---|
7480 | for(int i=this.nData-1; i>=0; i--){
|
---|
7481 | if(cumYe[i].getValue()>=1.0D){
|
---|
7482 | if(i>=jj){
|
---|
7483 | test=true;
|
---|
7484 | jj = this.nData-1;
|
---|
7485 | while(test){
|
---|
7486 | jj--;
|
---|
7487 | if(jj<0)throw new ArithmeticException("all unity cumulative data!!");
|
---|
7488 | if(cumYe[jj].getValue()<1.0D){
|
---|
7489 | cumYe[i]=cumYe[jj].copy();
|
---|
7490 | cumX[i]=cumX[jj];
|
---|
7491 | test=false;
|
---|
7492 | }
|
---|
7493 | }
|
---|
7494 | }
|
---|
7495 | else{
|
---|
7496 | if(i==0){
|
---|
7497 | cumYe[i]=cumYe[i+1].copy();
|
---|
7498 | cumX[i]=cumX[i+1];
|
---|
7499 | }
|
---|
7500 | else{
|
---|
7501 | cumYe[i]=cumYe[i-1].plus(cumYe[i+1]);
|
---|
7502 | cumYe[i]=cumYe[i].over(2.0D);
|
---|
7503 | cumX[i]=(cumX[i-1]+cumX[i+1])/2.0D;
|
---|
7504 | }
|
---|
7505 | }
|
---|
7506 | }
|
---|
7507 | }
|
---|
7508 |
|
---|
7509 | return yScale;
|
---|
7510 | }
|
---|
7511 |
|
---|
7512 | public void weibull(){
|
---|
7513 | this.fitWeibull(0, 0);
|
---|
7514 | }
|
---|
7515 |
|
---|
7516 | public void weibullPlot(){
|
---|
7517 | this.fitWeibull(1, 0);
|
---|
7518 | }
|
---|
7519 |
|
---|
7520 | public void weibullTwoPar(){
|
---|
7521 | this.fitWeibull(0, 1);
|
---|
7522 | }
|
---|
7523 |
|
---|
7524 | public void weibullTwoParPlot(){
|
---|
7525 | this.fitWeibull(1, 1);
|
---|
7526 | }
|
---|
7527 |
|
---|
7528 | public void weibullStandard(){
|
---|
7529 | this.fitWeibull(0, 2);
|
---|
7530 | }
|
---|
7531 |
|
---|
7532 | public void weibullStandardPlot(){
|
---|
7533 | this.fitWeibull(1, 2);
|
---|
7534 | }
|
---|
7535 |
|
---|
7536 | protected void fitWeibull(int allTest, int typeFlag){
|
---|
7537 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
7538 | this.userSupplied = false;
|
---|
7539 | switch(typeFlag){
|
---|
7540 | case 0: this.lastMethod=16;
|
---|
7541 | this.nTerms=4;
|
---|
7542 | break;
|
---|
7543 | case 1: this.lastMethod=17;
|
---|
7544 | this.nTerms=3;
|
---|
7545 | break;
|
---|
7546 | case 2: this.lastMethod=18;
|
---|
7547 | this.nTerms=2;
|
---|
7548 | break;
|
---|
7549 | }
|
---|
7550 | if(!this.scaleFlag)this.nTerms=this.nTerms-1;
|
---|
7551 | this.frechetWeibull=false;
|
---|
7552 | this.fitFrechetWeibull(allTest, typeFlag);
|
---|
7553 | }
|
---|
7554 |
|
---|
7555 | public void gumbelMin(){
|
---|
7556 | this.fitGumbel(0, 0);
|
---|
7557 | }
|
---|
7558 |
|
---|
7559 | public void gumbelMinPlot(){
|
---|
7560 | this.fitGumbel(1, 0);
|
---|
7561 | }
|
---|
7562 |
|
---|
7563 | public void gumbelMax(){
|
---|
7564 | this.fitGumbel(0, 1);
|
---|
7565 | }
|
---|
7566 | public void gumbelMaxPlot(){
|
---|
7567 | this.fitGumbel(1, 1);
|
---|
7568 | }
|
---|
7569 |
|
---|
7570 | public void gumbelMinOnePar(){
|
---|
7571 | this.fitGumbel(0, 2);
|
---|
7572 | }
|
---|
7573 |
|
---|
7574 | public void gumbelMinOneParPlot(){
|
---|
7575 | this.fitGumbel(1, 2);
|
---|
7576 | }
|
---|
7577 |
|
---|
7578 | public void gumbelMaxOnePar(){
|
---|
7579 | this.fitGumbel(0, 3);
|
---|
7580 | }
|
---|
7581 |
|
---|
7582 | public void gumbelMaxOneParPlot(){
|
---|
7583 | this.fitGumbel(1, 3);
|
---|
7584 | }
|
---|
7585 |
|
---|
7586 | public void gumbelMinStandard(){
|
---|
7587 | this.fitGumbel(0, 4);
|
---|
7588 | }
|
---|
7589 |
|
---|
7590 | public void gumbelMinStandardPlot(){
|
---|
7591 | this.fitGumbel(1, 4);
|
---|
7592 | }
|
---|
7593 |
|
---|
7594 | public void gumbelMaxStandard(){
|
---|
7595 | this.fitGumbel(0, 5);
|
---|
7596 | }
|
---|
7597 |
|
---|
7598 | public void gumbelMaxStandardPlot(){
|
---|
7599 | this.fitGumbel(1, 5);
|
---|
7600 | }
|
---|
7601 |
|
---|
7602 | // No parameters set for estimation
|
---|
7603 | // Correlation coefficient and plot
|
---|
7604 | protected void noParameters(String ss){
|
---|
7605 | System.out.println(ss+" Regression");
|
---|
7606 | System.out.println("No parameters set for estimation");
|
---|
7607 | System.out.println("Theoretical curve obtained");
|
---|
7608 | String filename1="RegressOutput.txt";
|
---|
7609 | String filename2="RegressOutputN.txt";
|
---|
7610 | FileOutput fout = new FileOutput(filename1, 'n');
|
---|
7611 | System.out.println("Results printed to the file "+filename2);
|
---|
7612 | fout.dateAndTimeln(filename1);
|
---|
7613 | fout.println("No parameters set for estimation");
|
---|
7614 | switch(this.lastMethod){
|
---|
7615 | case 11: fout.println("Minimal Standard Gumbel p(x) = exp(x)exp(-exp(x))");
|
---|
7616 | for(int i=0; i<this.nData; i++)this.yCalc[i]=Math.exp(this.xData[0][i])*Math.exp(-Math.exp(this.xData[0][i]));
|
---|
7617 | break;
|
---|
7618 | case 12: fout.println("Maximal Standard Gumbel p(x) = exp(-x)exp(-exp(-x))");
|
---|
7619 | for(int i=0; i<this.nData; i++)this.yCalc[i]=Math.exp(-this.xData[0][i])*Math.exp(-Math.exp(-this.xData[0][i]));
|
---|
7620 | break;
|
---|
7621 | case 21: fout.println("Standard Exponential p(x) = exp(-x)");
|
---|
7622 | for(int i=0; i<this.nData; i++)this.yCalc[i]=Math.exp(-this.xData[0][i]);
|
---|
7623 | break;
|
---|
7624 | }
|
---|
7625 | this.sumOfSquaresError = 0.0D;
|
---|
7626 | this.chiSquare = 0.0D;
|
---|
7627 | double temp = 0.0D;
|
---|
7628 | for(int i=0; i<this.nData; i++){
|
---|
7629 | temp = Fmath.square(this.yData[i]-this.yCalc[i]);
|
---|
7630 | this.sumOfSquaresError += temp;
|
---|
7631 | this.chiSquare += temp/Fmath.square(this.weight[i]);
|
---|
7632 | }
|
---|
7633 | double corrCoeff = Stat.corrCoeff(this.yData, this.yCalc);
|
---|
7634 | fout.printtab("Correlation Coefficient");
|
---|
7635 | fout.println(Fmath.truncate(corrCoeff, this.prec));
|
---|
7636 | fout.printtab("Correlation Coefficient Probability");
|
---|
7637 | fout.println(Fmath.truncate(1.0D-Stat.linearCorrCoeffProb(corrCoeff, this.degreesOfFreedom-1), this.prec));
|
---|
7638 |
|
---|
7639 | fout.printtab("Sum of Squares");
|
---|
7640 | fout.println(Fmath.truncate(this.sumOfSquaresError, this.prec));
|
---|
7641 | if(this.weightOpt || this.trueFreq){
|
---|
7642 | fout.printtab("Chi Square");
|
---|
7643 | fout.println(Fmath.truncate(this.chiSquare, this.prec));
|
---|
7644 | fout.printtab("chi square probability");
|
---|
7645 | fout.println(Fmath.truncate(Stat.chiSquareProb(this.chiSquare, this.degreesOfFreedom-1), this.prec));
|
---|
7646 | }
|
---|
7647 | fout.println(" ");
|
---|
7648 |
|
---|
7649 | fout.printtab("x", this.field);
|
---|
7650 | fout.printtab("p(x) [expl]", this.field);
|
---|
7651 | fout.printtab("p(x) [calc]", this.field);
|
---|
7652 | fout.println("residual");
|
---|
7653 |
|
---|
7654 | for(int i=0; i<this.nData; i++){
|
---|
7655 | fout.printtab(Fmath.truncate(this.xData[0][i], this.prec), this.field);
|
---|
7656 | fout.printtab(Fmath.truncate(this.yData[i], this.prec), this.field);
|
---|
7657 | fout.printtab(Fmath.truncate(this.yCalc[i], this.prec), this.field);
|
---|
7658 | fout.println(Fmath.truncate(this.yData[i]-this.yCalc[i], this.prec));
|
---|
7659 | }
|
---|
7660 | fout.close();
|
---|
7661 | this.plotXY();
|
---|
7662 | if(!this.supressYYplot)this.plotYY();
|
---|
7663 | }
|
---|
7664 |
|
---|
7665 | protected void fitGumbel(int allTest, int typeFlag){
|
---|
7666 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
7667 | this.userSupplied = false;
|
---|
7668 | switch(typeFlag){
|
---|
7669 | case 0: this.lastMethod=7;
|
---|
7670 | this.nTerms=3;
|
---|
7671 | break;
|
---|
7672 | case 1: this.lastMethod=8;
|
---|
7673 | this.nTerms=3;
|
---|
7674 | break;
|
---|
7675 | case 2: this.lastMethod=9;
|
---|
7676 | this.nTerms=2;
|
---|
7677 | break;
|
---|
7678 | case 3: this.lastMethod=10;
|
---|
7679 | this.nTerms=2;
|
---|
7680 | break;
|
---|
7681 | case 4: this.lastMethod=11;
|
---|
7682 | this.nTerms=1;
|
---|
7683 | break;
|
---|
7684 | case 5: this.lastMethod=12;
|
---|
7685 | this.nTerms=1;
|
---|
7686 | break;
|
---|
7687 | }
|
---|
7688 | if(!this.scaleFlag)this.nTerms=this.nTerms-1;
|
---|
7689 | this.zeroCheck = false;
|
---|
7690 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
7691 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
7692 | if(this.nTerms==0){
|
---|
7693 | this.noParameters("Gumbel");
|
---|
7694 | }
|
---|
7695 | else{
|
---|
7696 |
|
---|
7697 |
|
---|
7698 | // order data into ascending order of the abscissae
|
---|
7699 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
7700 |
|
---|
7701 | // check sign of y data
|
---|
7702 | Double tempd=null;
|
---|
7703 | ArrayList<Object> retY = Regression.dataSign(yData);
|
---|
7704 | tempd = (Double)retY.get(4);
|
---|
7705 | double yPeak = tempd.doubleValue();
|
---|
7706 | boolean yFlag = false;
|
---|
7707 |
|
---|
7708 | if(yPeak<0.0D){
|
---|
7709 | System.out.println("Regression.fitGumbel(): This implementation of the Gumbel distribution takes only positive y values\n(noise taking low values below zero are allowed)");
|
---|
7710 | System.out.println("All y values have been multiplied by -1 before fitting");
|
---|
7711 | for(int i =0; i<this.nData; i++){
|
---|
7712 | yData[i] = -yData[i];
|
---|
7713 | }
|
---|
7714 | retY = Regression.dataSign(yData);
|
---|
7715 | yFlag=true;
|
---|
7716 | }
|
---|
7717 |
|
---|
7718 | // check x data
|
---|
7719 | ArrayList<Object> retX = Regression.dataSign(xData[0]);
|
---|
7720 | Integer tempi = null;
|
---|
7721 |
|
---|
7722 | // Calculate x value at peak y (estimate of the 'distribution mode')
|
---|
7723 | tempi = (Integer)retY.get(5);
|
---|
7724 | int peaki = tempi.intValue();
|
---|
7725 | double distribMode = xData[0][peaki];
|
---|
7726 |
|
---|
7727 | // Calculate an estimate of the half-height width
|
---|
7728 | double sd = halfWidth(xData[0], yData);
|
---|
7729 |
|
---|
7730 | // Nelder and Mead Simplex Regression for Gumbel
|
---|
7731 | // Fill arrays needed by the Simplex
|
---|
7732 | double[] start = new double[this.nTerms];
|
---|
7733 | double[] step = new double[this.nTerms];
|
---|
7734 | switch(typeFlag){
|
---|
7735 | case 0:
|
---|
7736 | case 1:
|
---|
7737 | start[0] = distribMode; //mu
|
---|
7738 | start[1] = sd*Math.sqrt(6.0D)/Math.PI; //sigma
|
---|
7739 | if(this.scaleFlag){
|
---|
7740 | start[2] = yPeak*start[1]*Math.exp(1); //y axis scaling factor
|
---|
7741 | }
|
---|
7742 | step[0] = 0.1D*start[0];
|
---|
7743 | if(step[0]==0.0D){
|
---|
7744 | ArrayList<Object> ret0 = Regression.dataSign(xData[0]);
|
---|
7745 | Double tempdd = null;
|
---|
7746 | tempdd = (Double)ret0.get(2);
|
---|
7747 | double xmax = tempdd.doubleValue();
|
---|
7748 | if(xmax==0.0D){
|
---|
7749 | tempdd = (Double)ret0.get(0);
|
---|
7750 | xmax = tempdd.doubleValue();
|
---|
7751 | }
|
---|
7752 | step[0]=xmax*0.1D;
|
---|
7753 | }
|
---|
7754 | step[1] = 0.1D*start[1];
|
---|
7755 | if(this.scaleFlag)step[2] = 0.1D*start[2];
|
---|
7756 |
|
---|
7757 | // Add constraints
|
---|
7758 | this.addConstraint(1,-1,0.0D);
|
---|
7759 | break;
|
---|
7760 | case 2:
|
---|
7761 | case 3:
|
---|
7762 | start[0] = sd*Math.sqrt(6.0D)/Math.PI; //sigma
|
---|
7763 | if(this.scaleFlag){
|
---|
7764 | start[1] = yPeak*start[0]*Math.exp(1); //y axis scaling factor
|
---|
7765 | }
|
---|
7766 | step[0] = 0.1D*start[0];
|
---|
7767 | if(this.scaleFlag)step[1] = 0.1D*start[1];
|
---|
7768 | // Add constraints
|
---|
7769 | this.addConstraint(0,-1,0.0D);
|
---|
7770 | break;
|
---|
7771 | case 4:
|
---|
7772 | case 5:
|
---|
7773 | if(this.scaleFlag){
|
---|
7774 | start[0] = yPeak*Math.exp(1); //y axis scaling factor
|
---|
7775 | step[0] = 0.1D*start[0];
|
---|
7776 | }
|
---|
7777 | break;
|
---|
7778 | }
|
---|
7779 |
|
---|
7780 | // Create instance of Gumbel function
|
---|
7781 | GumbelFunction ff = new GumbelFunction();
|
---|
7782 |
|
---|
7783 | // Set minimum type / maximum type option
|
---|
7784 | ff.typeFlag = typeFlag;
|
---|
7785 |
|
---|
7786 | // Set ordinate scaling option
|
---|
7787 | ff.scaleOption = this.scaleFlag;
|
---|
7788 | ff.scaleFactor = this.yScaleFactor;
|
---|
7789 |
|
---|
7790 | if(typeFlag<4){
|
---|
7791 |
|
---|
7792 | // Perform simplex regression
|
---|
7793 | Object regFun3 = (Object)ff;
|
---|
7794 | this.nelderMead(regFun3, start, step, this.fTol, this.nMax);
|
---|
7795 |
|
---|
7796 | if(allTest==1){
|
---|
7797 | // Print results
|
---|
7798 | if(!this.supressPrint)this.print();
|
---|
7799 |
|
---|
7800 | // Plot results
|
---|
7801 | int flag = this.plotXY(ff);
|
---|
7802 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
7803 | }
|
---|
7804 | }
|
---|
7805 | else{
|
---|
7806 | // calculate exp exp term
|
---|
7807 | double[][] xxx = new double[1][this.nData];
|
---|
7808 | double aa=1.0D;
|
---|
7809 | if(typeFlag==5)aa=-1.0D;
|
---|
7810 | for(int i=0; i<this.nData; i++){
|
---|
7811 | xxx[0][i]=Math.exp(aa*this.xData[0][i])*Math.exp(-Math.exp(aa*this.xData[0][i]));
|
---|
7812 | }
|
---|
7813 |
|
---|
7814 | // perform linear regression
|
---|
7815 | this.linNonLin = true;
|
---|
7816 | this.generalLinear(xxx);
|
---|
7817 |
|
---|
7818 | if(!this.supressPrint)this.print();
|
---|
7819 | if(!this.supressYYplot)this.plotYY();
|
---|
7820 | this.plotXY();
|
---|
7821 |
|
---|
7822 | this.linNonLin = false;
|
---|
7823 |
|
---|
7824 | }
|
---|
7825 |
|
---|
7826 | if(yFlag){
|
---|
7827 | // restore data
|
---|
7828 | for(int i=0; i<this.nData-1; i++){
|
---|
7829 | this.yData[i]=-this.yData[i];
|
---|
7830 | }
|
---|
7831 | }
|
---|
7832 | }
|
---|
7833 | }
|
---|
7834 |
|
---|
7835 | // sort elements x, y and w arrays of doubles into ascending order of the x array
|
---|
7836 | // using selection sort method
|
---|
7837 | protected static void sort(double[] x, double[] y, double[] w){
|
---|
7838 | int index = 0;
|
---|
7839 | int lastIndex = -1;
|
---|
7840 | int n = x.length;
|
---|
7841 | double holdx = 0.0D;
|
---|
7842 | double holdy = 0.0D;
|
---|
7843 | double holdw = 0.0D;
|
---|
7844 |
|
---|
7845 | while(lastIndex < n-1){
|
---|
7846 | index = lastIndex+1;
|
---|
7847 | for(int i=lastIndex+2; i<n; i++){
|
---|
7848 | if(x[i]<x[index]){
|
---|
7849 | index=i;
|
---|
7850 | }
|
---|
7851 | }
|
---|
7852 | lastIndex++;
|
---|
7853 | holdx=x[index];
|
---|
7854 | x[index]=x[lastIndex];
|
---|
7855 | x[lastIndex]=holdx;
|
---|
7856 | holdy=y[index];
|
---|
7857 | y[index]=y[lastIndex];
|
---|
7858 | y[lastIndex]=holdy;
|
---|
7859 | holdw=w[index];
|
---|
7860 | w[index]=w[lastIndex];
|
---|
7861 | w[lastIndex]=holdw;
|
---|
7862 | }
|
---|
7863 | }
|
---|
7864 |
|
---|
7865 | // returns rough estimate of half-height width
|
---|
7866 | protected static double halfWidth(double[] xData, double[] yData){
|
---|
7867 | // Find index of maximum value and calculate half maximum height
|
---|
7868 | double ymax = yData[0];
|
---|
7869 | int imax = 0;
|
---|
7870 | int n = xData.length;
|
---|
7871 |
|
---|
7872 | for(int i=1; i<n; i++){
|
---|
7873 | if(yData[i]>ymax){
|
---|
7874 | ymax=yData[i];
|
---|
7875 | imax=i;
|
---|
7876 | }
|
---|
7877 | }
|
---|
7878 | ymax /= 2.0D;
|
---|
7879 |
|
---|
7880 | // Find index of point at half maximum value on the low side of the maximum
|
---|
7881 | double halfXlow=-1.0D;
|
---|
7882 | double halfYlow=-1.0D;
|
---|
7883 | double temp = -1.0D;
|
---|
7884 | int ihl=-1;
|
---|
7885 | if(imax>0){
|
---|
7886 | ihl=imax-1;
|
---|
7887 | halfYlow=Math.abs(ymax-yData[ihl]);
|
---|
7888 | for(int i=imax-2; i>=0; i--){
|
---|
7889 | temp=Math.abs(ymax-yData[i]);
|
---|
7890 | if(temp<halfYlow){
|
---|
7891 | halfYlow=temp;
|
---|
7892 | ihl=i;
|
---|
7893 | }
|
---|
7894 | }
|
---|
7895 | halfXlow=Math.abs(xData[ihl]-xData[imax]);
|
---|
7896 | }
|
---|
7897 |
|
---|
7898 | // Find index of point at half maximum value on the high side of the maximum
|
---|
7899 | double halfXhigh=-1.0D;
|
---|
7900 | double halfYhigh=-1.0D;
|
---|
7901 | temp = -1.0D;
|
---|
7902 | int ihh=-1;
|
---|
7903 | if(imax<n-1){
|
---|
7904 | ihh=imax+1;
|
---|
7905 | halfYhigh=Math.abs(ymax-yData[ihh]);
|
---|
7906 | for(int i=imax+2; i<n; i++){
|
---|
7907 | temp=Math.abs(ymax-yData[i]);
|
---|
7908 | if(temp<halfYhigh){
|
---|
7909 | halfYhigh=temp;
|
---|
7910 | ihh=i;
|
---|
7911 | }
|
---|
7912 | }
|
---|
7913 | halfXhigh=Math.abs(xData[ihh]-xData[imax]);
|
---|
7914 | }
|
---|
7915 |
|
---|
7916 | // Calculate width at half height
|
---|
7917 | double halfw = 0.0D;
|
---|
7918 | if(ihl!=-1)halfw += halfXlow;
|
---|
7919 | if(ihh!=-1)halfw += halfXhigh;
|
---|
7920 |
|
---|
7921 | return halfw;
|
---|
7922 | }
|
---|
7923 |
|
---|
7924 | // FIT TO A SIMPLE EXPOPNENTIAL
|
---|
7925 |
|
---|
7926 | // method for fitting data to a simple exponential
|
---|
7927 | public void exponentialSimple(){
|
---|
7928 | fitsexponentialSimple(0);
|
---|
7929 | }
|
---|
7930 |
|
---|
7931 | // method for fitting data to a simple exponential
|
---|
7932 | public void exponentialSimplePlot(){
|
---|
7933 | fitsexponentialSimple(1);
|
---|
7934 | }
|
---|
7935 |
|
---|
7936 | // method for fitting data to a simple exponential
|
---|
7937 | protected void fitsexponentialSimple(int plotFlag){
|
---|
7938 |
|
---|
7939 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
7940 | this.lastMethod=43;
|
---|
7941 | this.userSupplied = false;
|
---|
7942 | this.linNonLin = false;
|
---|
7943 | this.zeroCheck = false;
|
---|
7944 | this.nTerms=2;
|
---|
7945 | if(!this.scaleFlag)this.nTerms=1;
|
---|
7946 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
7947 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
7948 |
|
---|
7949 | // order data into ascending order of the abscissae
|
---|
7950 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
7951 |
|
---|
7952 | // Estimate of yscale and A - linear transform
|
---|
7953 | int nLen = this.yData.length;
|
---|
7954 | int nLin = nLen;
|
---|
7955 | boolean[] zeros = new boolean[nLen];
|
---|
7956 | for(int i=0; i<nLen; i++){
|
---|
7957 | zeros[i] = true;
|
---|
7958 | if(this.xData[0][i]<=0.0D||this.yData[i]<=0.0D){
|
---|
7959 | zeros[i] = false;
|
---|
7960 | nLin--;
|
---|
7961 | }
|
---|
7962 | }
|
---|
7963 | double[] xlin = new double[nLin];
|
---|
7964 | double[] ylin = new double[nLin];
|
---|
7965 | double[] wlin = new double[nLin];
|
---|
7966 | int counter = 0;
|
---|
7967 | for(int i=0; i<nLen; i++){
|
---|
7968 | if(zeros[i]){
|
---|
7969 | xlin[counter] = Math.log(this.xData[0][i]);
|
---|
7970 | ylin[counter] = Math.log(this.yData[i]);
|
---|
7971 | wlin[counter] = Math.abs(this.weight[i]/this.yData[i]);
|
---|
7972 | counter++;
|
---|
7973 | }
|
---|
7974 | }
|
---|
7975 |
|
---|
7976 | Regression reglin = new Regression(xlin, ylin, wlin);
|
---|
7977 | double[] start = new double[nTerms];
|
---|
7978 | double[] step = new double[nTerms];
|
---|
7979 | if(this.scaleFlag){
|
---|
7980 | reglin.linear();
|
---|
7981 | double[] coeff = reglin.getBestEstimates();
|
---|
7982 | double[] errrs = reglin.getBestEstimatesErrors();
|
---|
7983 |
|
---|
7984 | // initial estimates
|
---|
7985 | start[0] = coeff[1];
|
---|
7986 | start[1] = Math.exp(coeff[0]);
|
---|
7987 |
|
---|
7988 | // initial step sizes
|
---|
7989 | step[0] = errrs[1]/2.0;
|
---|
7990 | step[1] = errrs[0]*start[0]/2.0;
|
---|
7991 | if(step[0]<=0.0 || Fmath.isNaN(step[0]))step[0] = Math.abs(start[0]*0.1);
|
---|
7992 | if(step[1]<=0.0 || Fmath.isNaN(step[1]))step[1] = Math.abs(start[1]*0.1);
|
---|
7993 | }
|
---|
7994 | else{
|
---|
7995 | reglin.linearGeneral();
|
---|
7996 | double[] coeff = reglin.getBestEstimates();
|
---|
7997 | double[] errrs = reglin.getBestEstimatesErrors();
|
---|
7998 |
|
---|
7999 | // initial estimates
|
---|
8000 | start[0] = coeff[1];
|
---|
8001 |
|
---|
8002 | // initial step sizes
|
---|
8003 | step[0] = errrs[1]/2.0;
|
---|
8004 | if(step[0]<=0.0 || Fmath.isNaN(step[0]))step[0] = Math.abs(start[0]*0.1);
|
---|
8005 | }
|
---|
8006 |
|
---|
8007 | // Nelder and Mead Simplex Regression
|
---|
8008 | ExponentialSimpleFunction f = new ExponentialSimpleFunction();
|
---|
8009 | f.scaleOption = this.scaleFlag;
|
---|
8010 | f.scaleFactor = this.yScaleFactor;
|
---|
8011 | Object regFun2 = (Object)f;
|
---|
8012 | this.nelderMead(regFun2, start, step, this.fTol, this.nMax);
|
---|
8013 |
|
---|
8014 | if(plotFlag==1){
|
---|
8015 | // Print results
|
---|
8016 | if(!this.supressPrint)this.print();
|
---|
8017 |
|
---|
8018 | // Plot results
|
---|
8019 | int flag = this.plotXY(f);
|
---|
8020 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
8021 | }
|
---|
8022 | }
|
---|
8023 |
|
---|
8024 |
|
---|
8025 | // FIT TO MULTIPLE EXPOPNENTIALS
|
---|
8026 |
|
---|
8027 | // method for fitting data to mutiple exponentials
|
---|
8028 | // initial estimates calculated internally
|
---|
8029 | public void exponentialMultiple(int nExps){
|
---|
8030 | this.userSupplied = false;
|
---|
8031 | fitsexponentialMultiple(nExps,0);
|
---|
8032 | }
|
---|
8033 |
|
---|
8034 | // method for fitting data to a multiple exponentials
|
---|
8035 | // initial estimates calculated internally
|
---|
8036 | public void exponentialMultiplePlot(int nExps){
|
---|
8037 | this.userSupplied = false;
|
---|
8038 | fitsexponentialMultiple(nExps, 1);
|
---|
8039 | }
|
---|
8040 |
|
---|
8041 | // method for fitting data to mutiple exponentials
|
---|
8042 | // user supplied initial estimates
|
---|
8043 | public void exponentialMultiple(int nExps, double[] AandBs){
|
---|
8044 | this.userSupplied = true;
|
---|
8045 | fitsexponentialMultiple(nExps, 0, AandBs);
|
---|
8046 | }
|
---|
8047 |
|
---|
8048 | // method for fitting data to a multiple exponentials
|
---|
8049 | // user supplied initial estimates
|
---|
8050 | public void exponentialMultiplePlot(int nExps, double[] AandBs){
|
---|
8051 | this.userSupplied = true;
|
---|
8052 | fitsexponentialMultiple(nExps, 1, AandBs);
|
---|
8053 | }
|
---|
8054 |
|
---|
8055 | // method for fitting data to a multiple exponentials
|
---|
8056 | // initial estimates calculated internally
|
---|
8057 | protected void fitsexponentialMultiple(int nExps, int plotFlag){
|
---|
8058 |
|
---|
8059 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
8060 | this.lastMethod=44;
|
---|
8061 | this.linNonLin = false;
|
---|
8062 | this.zeroCheck = false;
|
---|
8063 | this.nTerms=2*nExps;
|
---|
8064 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
8065 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
8066 |
|
---|
8067 | // order data into ascending order of the abscissae
|
---|
8068 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
8069 |
|
---|
8070 | // Estimate of yscale and A - linear transform
|
---|
8071 | int nLen = this.yData.length;
|
---|
8072 | int nLin = nLen;
|
---|
8073 | boolean[] zeros = new boolean[nLen];
|
---|
8074 | for(int i=0; i<nLen; i++){
|
---|
8075 | zeros[i] = true;
|
---|
8076 | if(this.xData[0][i]<=0.0D||this.yData[i]<=0.0D){
|
---|
8077 | zeros[i] = false;
|
---|
8078 | nLin--;
|
---|
8079 | }
|
---|
8080 | }
|
---|
8081 | double[] xlin = new double[nLin];
|
---|
8082 | double[] ylin = new double[nLin];
|
---|
8083 | double[] wlin = new double[nLin];
|
---|
8084 | int counter = 0;
|
---|
8085 | for(int i=0; i<nLen; i++){
|
---|
8086 | if(zeros[i]){
|
---|
8087 | xlin[counter] = Math.log(this.xData[0][i]);
|
---|
8088 | ylin[counter] = Math.log(this.yData[i]);
|
---|
8089 | wlin[counter] = Math.abs(this.weight[i]/this.yData[i]);
|
---|
8090 | counter++;
|
---|
8091 | }
|
---|
8092 | }
|
---|
8093 |
|
---|
8094 | Regression reglin = new Regression(xlin, ylin, wlin);
|
---|
8095 | double[] start = new double[nTerms];
|
---|
8096 | double[] step = new double[nTerms];
|
---|
8097 |
|
---|
8098 | reglin.linear();
|
---|
8099 | double[] coeff = reglin.getBestEstimates();
|
---|
8100 | double[] errrs = reglin.getBestEstimatesErrors();
|
---|
8101 |
|
---|
8102 | for(int i=0; i<this.nTerms; i+=2){
|
---|
8103 | // initial estimates
|
---|
8104 | start[i] = Math.exp(coeff[0])/this.nTerms;
|
---|
8105 | start[i+1] = coeff[1];
|
---|
8106 |
|
---|
8107 | // initial step sizes
|
---|
8108 | step[i] = errrs[0]*start[i]/2.0;
|
---|
8109 | step[i+1] = errrs[1]/2.0;
|
---|
8110 | if(step[i]<=0.0 || Fmath.isNaN(step[i]))step[i] = Math.abs(start[i]*0.1);
|
---|
8111 | if(step[i+1]<=0.0 || Fmath.isNaN(step[i+1]))step[i+1] = Math.abs(start[i+1]*0.1);
|
---|
8112 | }
|
---|
8113 |
|
---|
8114 | // Nelder and Mead Simplex Regression
|
---|
8115 | ExponentialMultipleFunction f = new ExponentialMultipleFunction();
|
---|
8116 | f.nExps = this.nTerms;
|
---|
8117 | Object regFun2 = (Object)f;
|
---|
8118 | this.nelderMead(regFun2, start, step, this.fTol, this.nMax);
|
---|
8119 |
|
---|
8120 | if(plotFlag==1){
|
---|
8121 | // Print results
|
---|
8122 | if(!this.supressPrint)this.print();
|
---|
8123 |
|
---|
8124 | // Plot results
|
---|
8125 | int flag = this.plotXY(f);
|
---|
8126 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
8127 | }
|
---|
8128 | }
|
---|
8129 |
|
---|
8130 | // method for fitting data to a multiple exponentials
|
---|
8131 | // user supplied initial estimates calculated
|
---|
8132 | protected void fitsexponentialMultiple(int nExps, int plotFlag, double[] aAndBs){
|
---|
8133 |
|
---|
8134 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
8135 | this.lastMethod=44;
|
---|
8136 | this.linNonLin = false;
|
---|
8137 | this.zeroCheck = false;
|
---|
8138 | this.nTerms=2*nExps;
|
---|
8139 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
8140 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
8141 |
|
---|
8142 | // order data into ascending order of the abscissae
|
---|
8143 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
8144 |
|
---|
8145 | double[] start = new double[nTerms];
|
---|
8146 | double[] step = new double[nTerms];
|
---|
8147 |
|
---|
8148 | for(int i=0; i<this.nTerms; i+=2){
|
---|
8149 | // initial estimates
|
---|
8150 | start[i] = aAndBs[i];
|
---|
8151 |
|
---|
8152 | // initial step sizes
|
---|
8153 | step[i] = Math.abs(start[i]*0.1);
|
---|
8154 | }
|
---|
8155 |
|
---|
8156 | // Nelder and Mead Simplex Regression
|
---|
8157 | ExponentialMultipleFunction f = new ExponentialMultipleFunction();
|
---|
8158 | f.nExps = this.nTerms;
|
---|
8159 | Object regFun2 = (Object)f;
|
---|
8160 | this.nelderMead(regFun2, start, step, this.fTol, this.nMax);
|
---|
8161 |
|
---|
8162 | if(plotFlag==1){
|
---|
8163 | // Print results
|
---|
8164 | if(!this.supressPrint)this.print();
|
---|
8165 |
|
---|
8166 | // Plot results
|
---|
8167 | int flag = this.plotXY(f);
|
---|
8168 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
8169 | }
|
---|
8170 | }
|
---|
8171 |
|
---|
8172 | // FIT TO ONE MINUS A SIMPLE EXPOPNENTIAL
|
---|
8173 |
|
---|
8174 | // method for fitting data to 1 - exponential
|
---|
8175 | public void oneMinusExponential(){
|
---|
8176 | fitsoneMinusExponential(0);
|
---|
8177 | }
|
---|
8178 |
|
---|
8179 | // method for fitting data to 1 - exponential
|
---|
8180 | public void oneMinusExponentialPlot(){
|
---|
8181 | fitsoneMinusExponential(1);
|
---|
8182 | }
|
---|
8183 |
|
---|
8184 | // method for fitting data to 1 - exponential
|
---|
8185 | protected void fitsoneMinusExponential(int plotFlag){
|
---|
8186 |
|
---|
8187 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
8188 | this.lastMethod=45;
|
---|
8189 | this.userSupplied = false;
|
---|
8190 | this.linNonLin = false;
|
---|
8191 | this.zeroCheck = false;
|
---|
8192 | this.nTerms=2;
|
---|
8193 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
8194 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
8195 |
|
---|
8196 | // order data into ascending order of the abscissae
|
---|
8197 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
8198 |
|
---|
8199 | // initial step sizes
|
---|
8200 | ArrayMaths am = new ArrayMaths(this.yData);
|
---|
8201 | double maxY = am.maximum();
|
---|
8202 | double minY = am.minimum();
|
---|
8203 | double testDirection = 1.0;
|
---|
8204 | double maxYhalf = maxY/2.0;
|
---|
8205 |
|
---|
8206 | if(Math.abs(minY)>Math.abs(maxY)){
|
---|
8207 | testDirection = -1.0;
|
---|
8208 | maxY = minY;
|
---|
8209 | maxYhalf = minY/2.0;
|
---|
8210 | }
|
---|
8211 | double timeHalf = Double.NaN;
|
---|
8212 | boolean test = true;
|
---|
8213 | int ii=0;
|
---|
8214 | while(test){
|
---|
8215 | if(this.yData[ii]==maxYhalf){
|
---|
8216 | timeHalf = this.xData[0][ii] - this.xData[0][0];
|
---|
8217 | test = false;
|
---|
8218 | }
|
---|
8219 | else{
|
---|
8220 | if(this.yData[ii]<maxYhalf && this.yData[ii+1]>maxYhalf){
|
---|
8221 | timeHalf = (this.xData[0][ii] + this.xData[0][ii+1])/2.0 - this.xData[0][0];
|
---|
8222 | test = false;
|
---|
8223 | }
|
---|
8224 | else{
|
---|
8225 | if(this.yData[ii]>maxYhalf && this.yData[ii+1]<maxYhalf){
|
---|
8226 | timeHalf = (this.xData[0][ii] + this.xData[0][ii+1])/2.0 - this.xData[0][0];
|
---|
8227 | test = false;
|
---|
8228 | }
|
---|
8229 | else{
|
---|
8230 | ii++;
|
---|
8231 | if(ii>=this.nData-1)test = false;
|
---|
8232 | }
|
---|
8233 | }
|
---|
8234 | }
|
---|
8235 | }
|
---|
8236 |
|
---|
8237 | if(timeHalf!=timeHalf){
|
---|
8238 | timeHalf = am.maximumDifference();
|
---|
8239 | }
|
---|
8240 |
|
---|
8241 | double guessB = -testDirection/timeHalf;
|
---|
8242 | double[] start = {maxY, guessB};
|
---|
8243 | double[] step = {Math.abs(start[0]/5.0), Math.abs(start[1]/5.0)};
|
---|
8244 |
|
---|
8245 | // Nelder and Mead Simplex Regression
|
---|
8246 | OneMinusExponentialFunction f = new OneMinusExponentialFunction();
|
---|
8247 | f.scaleOption = this.scaleFlag;
|
---|
8248 | f.scaleFactor = this.yScaleFactor;
|
---|
8249 | Object regFun2 = (Object)f;
|
---|
8250 | this.nelderMead(regFun2, start, step, this.fTol, this.nMax);
|
---|
8251 | double ss0 = this.sumOfSquaresError;
|
---|
8252 | double[] bestEstimates0 = this.best;
|
---|
8253 |
|
---|
8254 | // Repeat with A and B guess of opposite sign
|
---|
8255 | start[0] = -maxY;
|
---|
8256 | start[1] = -guessB;
|
---|
8257 | step[0] = Math.abs(start[0]/5.0);
|
---|
8258 | step[1] = Math.abs(start[1]/5.0);
|
---|
8259 | this.nelderMead(regFun2, start, step, this.fTol, this.nMax);
|
---|
8260 |
|
---|
8261 | // Choose better result
|
---|
8262 | if(this.sumOfSquaresError>ss0){
|
---|
8263 | start[0] = bestEstimates0[0];
|
---|
8264 | start[1] = bestEstimates0[1];
|
---|
8265 | step[0] = Math.abs(start[0]/20.0);
|
---|
8266 | step[1] = Math.abs(start[1]/20.0);
|
---|
8267 | this.nelderMead(regFun2, start, step, this.fTol, this.nMax);
|
---|
8268 | }
|
---|
8269 |
|
---|
8270 | // Plotting
|
---|
8271 | if(plotFlag==1){
|
---|
8272 | // Print results
|
---|
8273 | if(!this.supressPrint)this.print();
|
---|
8274 |
|
---|
8275 | // Plot results
|
---|
8276 | int flag = this.plotXY(f);
|
---|
8277 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
8278 | }
|
---|
8279 | }
|
---|
8280 |
|
---|
8281 | // FIT TO AN EXPOPNENTIAL DISTRIBUTION
|
---|
8282 |
|
---|
8283 | public void exponential(){
|
---|
8284 | this.fitExponential(0, 0);
|
---|
8285 | }
|
---|
8286 |
|
---|
8287 | public void exponentialPlot(){
|
---|
8288 | this.fitExponential(1, 0);
|
---|
8289 | }
|
---|
8290 |
|
---|
8291 | public void exponentialOnePar(){
|
---|
8292 | this.fitExponential(0, 1);
|
---|
8293 | }
|
---|
8294 |
|
---|
8295 | public void exponentialOneParPlot(){
|
---|
8296 | this.fitExponential(1, 1);
|
---|
8297 | }
|
---|
8298 |
|
---|
8299 | public void exponentialStandard(){
|
---|
8300 | this.fitExponential(0, 2);
|
---|
8301 | }
|
---|
8302 |
|
---|
8303 | public void exponentialStandardPlot(){
|
---|
8304 | this.fitExponential(1, 2);
|
---|
8305 | }
|
---|
8306 |
|
---|
8307 | protected void fitExponential(int allTest, int typeFlag){
|
---|
8308 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
8309 | this.userSupplied = false;
|
---|
8310 | switch(typeFlag){
|
---|
8311 | case 0: this.lastMethod=19;
|
---|
8312 | this.nTerms=3;
|
---|
8313 | break;
|
---|
8314 | case 1: this.lastMethod=20;
|
---|
8315 | this.nTerms=2;
|
---|
8316 | break;
|
---|
8317 | case 2: this.lastMethod=21;
|
---|
8318 | this.nTerms=1;
|
---|
8319 | break;
|
---|
8320 | }
|
---|
8321 | if(!this.scaleFlag)this.nTerms=this.nTerms-1;
|
---|
8322 | this.linNonLin = false;
|
---|
8323 | this.zeroCheck = false;
|
---|
8324 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
8325 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
8326 | if(this.nTerms==0){
|
---|
8327 | this.noParameters("Exponential");
|
---|
8328 | }
|
---|
8329 | else{
|
---|
8330 |
|
---|
8331 | // Save x-y-w data
|
---|
8332 | double[] xx = new double[this.nData];
|
---|
8333 | double[] yy = new double[this.nData];
|
---|
8334 | double[] ww = new double[this.nData];
|
---|
8335 |
|
---|
8336 | for(int i=0; i<this.nData; i++){
|
---|
8337 | xx[i]=this.xData[0][i];
|
---|
8338 | yy[i]=this.yData[i];
|
---|
8339 | ww[i]=this.weight[i];
|
---|
8340 | }
|
---|
8341 |
|
---|
8342 | // order data into ascending order of the abscissae
|
---|
8343 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
8344 |
|
---|
8345 | // check y data
|
---|
8346 | Double tempd=null;
|
---|
8347 | ArrayList<Object> retY = Regression.dataSign(yData);
|
---|
8348 | tempd = (Double)retY.get(4);
|
---|
8349 | double yPeak = tempd.doubleValue();
|
---|
8350 | Integer tempi = null;
|
---|
8351 | tempi = (Integer)retY.get(5);
|
---|
8352 | int peaki = tempi.intValue();
|
---|
8353 |
|
---|
8354 | // check sign of y data
|
---|
8355 | String ss = "Exponential";
|
---|
8356 | boolean ySignFlag = false;
|
---|
8357 | if(yPeak<0.0D){
|
---|
8358 | this.reverseYsign(ss);
|
---|
8359 | retY = Regression.dataSign(this.yData);
|
---|
8360 | yPeak = -yPeak;
|
---|
8361 | ySignFlag = true;
|
---|
8362 | }
|
---|
8363 |
|
---|
8364 | // check y values for all very small values
|
---|
8365 | boolean magCheck=false;
|
---|
8366 | double magScale = this.checkYallSmall(yPeak, ss);
|
---|
8367 | if(magScale!=1.0D){
|
---|
8368 | magCheck=true;
|
---|
8369 | yPeak=1.0D;
|
---|
8370 | }
|
---|
8371 |
|
---|
8372 | // minimum value of x
|
---|
8373 | ArrayList<Object> retX = Regression.dataSign(this.xData[0]);
|
---|
8374 | tempd = (Double)retX.get(0);
|
---|
8375 | double xMin = tempd.doubleValue();
|
---|
8376 |
|
---|
8377 | // estimate of sigma
|
---|
8378 | double yE = yPeak/Math.exp(1.0D);
|
---|
8379 | if(this.yData[0]<yPeak)yE = (yPeak+yData[0])/(2.0D*Math.exp(1.0D));
|
---|
8380 | double yDiff = Math.abs(yData[0]-yE);
|
---|
8381 | double yTest = 0.0D;
|
---|
8382 | int iE = 0;
|
---|
8383 | for(int i=1; i<this.nData; i++){
|
---|
8384 | yTest=Math.abs(this.yData[i]-yE);
|
---|
8385 | if(yTest<yDiff){
|
---|
8386 | yDiff=yTest;
|
---|
8387 | iE=i;
|
---|
8388 | }
|
---|
8389 | }
|
---|
8390 | double sigma = this.xData[0][iE]-this.xData[0][0];
|
---|
8391 |
|
---|
8392 | // Nelder and Mead Simplex Regression
|
---|
8393 | double[] start = new double[this.nTerms];
|
---|
8394 | double[] step = new double[this.nTerms];
|
---|
8395 |
|
---|
8396 | // Fill arrays needed by the Simplex
|
---|
8397 | switch(typeFlag){
|
---|
8398 | case 0: start[0] = xMin*0.9; //mu
|
---|
8399 | start[1] = sigma; //sigma
|
---|
8400 | if(this.scaleFlag){
|
---|
8401 | start[2] = yPeak*sigma; //y axis scaling factor
|
---|
8402 | }
|
---|
8403 | step[0] = 0.1D*start[0];
|
---|
8404 | if(step[0]==0.0D){
|
---|
8405 | ArrayList<Object> ret0 = Regression.dataSign(xData[0]);
|
---|
8406 | Double tempdd = null;
|
---|
8407 | tempdd = (Double)ret0.get(2);
|
---|
8408 | double xmax = tempdd.doubleValue();
|
---|
8409 | if(xmax==0.0D){
|
---|
8410 | tempdd = (Double)ret0.get(0);
|
---|
8411 | xmax = tempdd.doubleValue();
|
---|
8412 | }
|
---|
8413 | step[0]=xmax*0.1D;
|
---|
8414 | }
|
---|
8415 | step[1] = 0.1D*start[1];
|
---|
8416 | if(this.scaleFlag)step[2] = 0.1D*start[2];
|
---|
8417 | break;
|
---|
8418 | case 1: start[0] = sigma; //sigma
|
---|
8419 | if(this.scaleFlag){
|
---|
8420 | start[1] = yPeak*sigma; //y axis scaling factor
|
---|
8421 | }
|
---|
8422 | step[0] = 0.1D*start[0];
|
---|
8423 | if(this.scaleFlag)step[1] = 0.1D*start[1];
|
---|
8424 | break;
|
---|
8425 | case 2: if(this.scaleFlag){
|
---|
8426 | start[0] = yPeak; //y axis scaling factor
|
---|
8427 | step[0] = 0.1D*start[0];
|
---|
8428 | }
|
---|
8429 | break;
|
---|
8430 | }
|
---|
8431 |
|
---|
8432 | // Create instance of Exponential function and perform regression
|
---|
8433 | ExponentialFunction ff = new ExponentialFunction();
|
---|
8434 | ff.typeFlag = typeFlag;
|
---|
8435 | ff.scaleOption = this.scaleFlag;
|
---|
8436 | ff.scaleFactor = this.yScaleFactor;
|
---|
8437 | Object regFun3 = (Object)ff;
|
---|
8438 | this.nelderMead(regFun3, start, step, this.fTol, this.nMax);
|
---|
8439 |
|
---|
8440 | if(allTest==1){
|
---|
8441 | // Print results
|
---|
8442 | if(!this.supressPrint)this.print();
|
---|
8443 | // Plot results
|
---|
8444 | int flag = this.plotXY(ff);
|
---|
8445 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
8446 | }
|
---|
8447 |
|
---|
8448 | // restore data
|
---|
8449 | if(magCheck){
|
---|
8450 | for(int i =0; i<this.nData; i++){
|
---|
8451 | this.yData[i] = yy[i]/magScale;
|
---|
8452 | if(this.weightOpt)this.weight[i] = ww[i]/magScale;
|
---|
8453 | }
|
---|
8454 | }
|
---|
8455 | if(ySignFlag){
|
---|
8456 | for(int i =0; i<this.nData; i++){
|
---|
8457 | this.yData[i]=-this.yData[i];
|
---|
8458 | }
|
---|
8459 | }
|
---|
8460 | }
|
---|
8461 | }
|
---|
8462 |
|
---|
8463 | // check for zero and negative values
|
---|
8464 | public void checkZeroNeg(double [] xx, double[] yy, double[] ww){
|
---|
8465 | int jj = 0;
|
---|
8466 | boolean test = true;
|
---|
8467 | for(int i=0; i<this.nData; i++){
|
---|
8468 | if(yy[i]<=0.0D){
|
---|
8469 | if(i<=jj){
|
---|
8470 | test=true;
|
---|
8471 | jj = i;
|
---|
8472 | while(test){
|
---|
8473 | jj++;
|
---|
8474 | if(jj>=this.nData)throw new ArithmeticException("all zero cumulative data!!");
|
---|
8475 | if(yy[jj]>0.0D){
|
---|
8476 | yy[i]=yy[jj];
|
---|
8477 | xx[i]=xx[jj];
|
---|
8478 | ww[i]=ww[jj];
|
---|
8479 | test=false;
|
---|
8480 | }
|
---|
8481 | }
|
---|
8482 | }
|
---|
8483 | else{
|
---|
8484 | if(i==this.nData-1){
|
---|
8485 | yy[i]=yy[i-1];
|
---|
8486 | xx[i]=xx[i-1];
|
---|
8487 | ww[i]=ww[i-1];
|
---|
8488 | }
|
---|
8489 | else{
|
---|
8490 | yy[i]=(yy[i-1] + yy[i+1])/2.0D;
|
---|
8491 | xx[i]=(xx[i-1] + xx[i+1])/2.0D;
|
---|
8492 | ww[i]=(ww[i-1] + ww[i+1])/2.0D;
|
---|
8493 | }
|
---|
8494 | }
|
---|
8495 | }
|
---|
8496 | }
|
---|
8497 | }
|
---|
8498 |
|
---|
8499 | public void rayleigh(){
|
---|
8500 | this.fitRayleigh(0, 0);
|
---|
8501 | }
|
---|
8502 |
|
---|
8503 | public void rayleighPlot(){
|
---|
8504 | this.fitRayleigh(1, 0);
|
---|
8505 | }
|
---|
8506 |
|
---|
8507 | protected void fitRayleigh(int allTest, int typeFlag){
|
---|
8508 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
8509 | this.lastMethod=22;
|
---|
8510 | this.userSupplied = false;
|
---|
8511 | this.nTerms=2;
|
---|
8512 | if(!this.scaleFlag)this.nTerms=this.nTerms-1;
|
---|
8513 | this.linNonLin = false;
|
---|
8514 | this.zeroCheck = false;
|
---|
8515 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
8516 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
8517 |
|
---|
8518 |
|
---|
8519 | // order data into ascending order of the abscissae
|
---|
8520 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
8521 |
|
---|
8522 | // check y data
|
---|
8523 | Double tempd=null;
|
---|
8524 | ArrayList<Object> retY = Regression.dataSign(yData);
|
---|
8525 | tempd = (Double)retY.get(4);
|
---|
8526 | double yPeak = tempd.doubleValue();
|
---|
8527 | Integer tempi = null;
|
---|
8528 | tempi = (Integer)retY.get(5);
|
---|
8529 | int peaki = tempi.intValue();
|
---|
8530 |
|
---|
8531 | // check sign of y data
|
---|
8532 | String ss = "Rayleigh";
|
---|
8533 | boolean ySignFlag = false;
|
---|
8534 | if(yPeak<0.0D){
|
---|
8535 | this.reverseYsign(ss);
|
---|
8536 | retY = Regression.dataSign(this.yData);
|
---|
8537 | yPeak = -yPeak;
|
---|
8538 | ySignFlag = true;
|
---|
8539 | }
|
---|
8540 |
|
---|
8541 | // check y values for all very small values
|
---|
8542 | boolean magCheck=false;
|
---|
8543 | double magScale = this.checkYallSmall(yPeak, ss);
|
---|
8544 | if(magScale!=1.0D){
|
---|
8545 | magCheck=true;
|
---|
8546 | yPeak=1.0D;
|
---|
8547 | }
|
---|
8548 |
|
---|
8549 | // Save x-y-w data
|
---|
8550 | double[] xx = new double[this.nData];
|
---|
8551 | double[] yy = new double[this.nData];
|
---|
8552 | double[] ww = new double[this.nData];
|
---|
8553 |
|
---|
8554 | for(int i=0; i<this.nData; i++){
|
---|
8555 | xx[i]=this.xData[0][i];
|
---|
8556 | yy[i]=this.yData[i];
|
---|
8557 | ww[i]=this.weight[i];
|
---|
8558 | }
|
---|
8559 |
|
---|
8560 | // minimum value of x
|
---|
8561 | ArrayList<Object> retX = Regression.dataSign(this.xData[0]);
|
---|
8562 | tempd = (Double)retX.get(0);
|
---|
8563 | double xMin = tempd.doubleValue();
|
---|
8564 |
|
---|
8565 | // maximum value of x
|
---|
8566 | tempd = (Double)retX.get(2);
|
---|
8567 | double xMax = tempd.doubleValue();
|
---|
8568 |
|
---|
8569 | // Calculate x value at peak y (estimate of the 'distribution mode')
|
---|
8570 | double distribMode = xData[0][peaki];
|
---|
8571 |
|
---|
8572 | // Calculate an estimate of the half-height width
|
---|
8573 | double sd = Math.log(2.0D)*halfWidth(xData[0], yData);
|
---|
8574 |
|
---|
8575 | // Calculate the cumulative probability and return ordinate scaling factor estimate
|
---|
8576 | double[] cumX = new double[this.nData];
|
---|
8577 | double[] cumY = new double[this.nData];
|
---|
8578 | double[] cumW = new double[this.nData];
|
---|
8579 | ErrorProp[] cumYe = ErrorProp.oneDarray(this.nData);
|
---|
8580 | double yScale = this.calculateCumulativeValues(cumX, cumY, cumW, cumYe, peaki, yPeak, distribMode, ss);
|
---|
8581 |
|
---|
8582 | //Calculate log transform
|
---|
8583 | for(int i=0; i<this.nData; i++){
|
---|
8584 | cumYe[i] = ErrorProp.minus(1.0D,cumYe[i]);
|
---|
8585 | cumYe[i] = ErrorProp.over(1.0D, cumYe[i]);
|
---|
8586 | cumYe[i] = ErrorProp.log(cumYe[i]);
|
---|
8587 | cumY[i] = cumYe[i].getValue();
|
---|
8588 | cumW[i] = cumYe[i].getError();
|
---|
8589 | }
|
---|
8590 |
|
---|
8591 | // Fill data arrays with transformed data
|
---|
8592 | for(int i =0; i<this.nData; i++){
|
---|
8593 | xData[0][i] = cumX[i];
|
---|
8594 | yData[i] = cumY[i];
|
---|
8595 | weight[i] = cumW[i];
|
---|
8596 | }
|
---|
8597 | boolean weightOptHold = this.weightOpt;
|
---|
8598 | this.weightOpt=true;
|
---|
8599 |
|
---|
8600 | // Nelder and Mead Simplex Regression for semi-linearised Rayleigh
|
---|
8601 | // disable statistical analysis
|
---|
8602 | this.statFlag=false;
|
---|
8603 |
|
---|
8604 | // Fill arrays needed by the Simplex
|
---|
8605 | double[] start = new double[this.nTerms];
|
---|
8606 | double[] step = new double[this.nTerms];
|
---|
8607 | for(int i=0; i<this.nTerms; i++){
|
---|
8608 | start[i]=1.0D;
|
---|
8609 | step[i]=0.2D;
|
---|
8610 | }
|
---|
8611 | start[0] = sd; //sigma
|
---|
8612 | step[0] = 0.2D;
|
---|
8613 | this.addConstraint(0,-1,0.0D);
|
---|
8614 |
|
---|
8615 | // Create instance of log function and perform regression
|
---|
8616 | RayleighFunctionTwo f = new RayleighFunctionTwo();
|
---|
8617 | Object regFun2 = (Object)f;
|
---|
8618 | this.nelderMead(regFun2, start, step, this.fTol, this.nMax);
|
---|
8619 |
|
---|
8620 | // Get best estimates of log regression
|
---|
8621 | double[] ests = Conv.copy(this.best);
|
---|
8622 |
|
---|
8623 | // enable statistical analysis
|
---|
8624 | this.statFlag=true;
|
---|
8625 |
|
---|
8626 | // restore data reversing the loglog transform but maintaining any sign reversals
|
---|
8627 | this.weightOpt=weightOptHold;
|
---|
8628 | for(int i =0; i<this.nData; i++){
|
---|
8629 | xData[0][i] = xx[i];
|
---|
8630 | yData[i] = yy[i];
|
---|
8631 | weight[i] = ww[i];
|
---|
8632 | }
|
---|
8633 |
|
---|
8634 | // Fill arrays needed by the Simplex
|
---|
8635 | start[0] = ests[0]; //sigma
|
---|
8636 | if(this.scaleFlag){
|
---|
8637 | start[1] = 1.0/yScale; //y axis scaling factor
|
---|
8638 | }
|
---|
8639 | step[0] = 0.1D*start[0];
|
---|
8640 | if(this.scaleFlag)step[1] = 0.1D*start[1];
|
---|
8641 |
|
---|
8642 |
|
---|
8643 | // Create instance of Rayleigh function and perform regression
|
---|
8644 | RayleighFunctionOne ff = new RayleighFunctionOne();
|
---|
8645 | ff.scaleOption = this.scaleFlag;
|
---|
8646 | ff.scaleFactor = this.yScaleFactor;
|
---|
8647 | Object regFun3 = (Object)ff;
|
---|
8648 | this.nelderMead(regFun3, start, step, this.fTol, this.nMax);
|
---|
8649 |
|
---|
8650 | if(allTest==1){
|
---|
8651 | // Print results
|
---|
8652 | if(!this.supressPrint)this.print();
|
---|
8653 | // Plot results
|
---|
8654 | int flag = this.plotXY(ff);
|
---|
8655 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
8656 | }
|
---|
8657 |
|
---|
8658 | // restore data
|
---|
8659 | if(magCheck){
|
---|
8660 | for(int i =0; i<this.nData; i++){
|
---|
8661 | this.yData[i] = yy[i]/magScale;
|
---|
8662 | if(this.weightOpt)this.weight[i] = ww[i]/magScale;
|
---|
8663 | }
|
---|
8664 | }
|
---|
8665 | if(ySignFlag){
|
---|
8666 | for(int i =0; i<this.nData; i++){
|
---|
8667 | this.yData[i]=-this.yData[i];
|
---|
8668 | }
|
---|
8669 | }
|
---|
8670 | }
|
---|
8671 |
|
---|
8672 | // Shifted Pareto
|
---|
8673 | public void paretoShifted(){
|
---|
8674 | this.fitPareto(0, 3);
|
---|
8675 | }
|
---|
8676 |
|
---|
8677 | public void paretoThreePar(){
|
---|
8678 | this.fitPareto(0, 3);
|
---|
8679 | }
|
---|
8680 |
|
---|
8681 | public void paretoShiftedPlot(){
|
---|
8682 | this.fitPareto(1, 3);
|
---|
8683 | }
|
---|
8684 | public void paretoThreeParPlot(){
|
---|
8685 | this.fitPareto(1, 3);
|
---|
8686 | }
|
---|
8687 |
|
---|
8688 | // Two Parameter Pareto
|
---|
8689 | public void paretoTwoPar(){
|
---|
8690 | this.fitPareto(0, 2);
|
---|
8691 | }
|
---|
8692 | // Deprecated
|
---|
8693 | public void pareto(){
|
---|
8694 | this.fitPareto(0, 2);
|
---|
8695 | }
|
---|
8696 |
|
---|
8697 | public void paretoTwoParPlot(){
|
---|
8698 | this.fitPareto(1, 2);
|
---|
8699 | }
|
---|
8700 | // Deprecated
|
---|
8701 | public void paretoPlot(){
|
---|
8702 | this.fitPareto(1, 2);
|
---|
8703 | }
|
---|
8704 |
|
---|
8705 | // One Parameter Pareto
|
---|
8706 | public void paretoOnePar(){
|
---|
8707 | this.fitPareto(0, 1);
|
---|
8708 | }
|
---|
8709 |
|
---|
8710 | public void paretoOneParPlot(){
|
---|
8711 | this.fitPareto(1, 1);
|
---|
8712 | }
|
---|
8713 |
|
---|
8714 | // method for fitting data to a Pareto distribution
|
---|
8715 | protected void fitPareto(int allTest, int typeFlag){
|
---|
8716 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
8717 | this.userSupplied = false;
|
---|
8718 | switch(typeFlag){
|
---|
8719 | case 3: this.lastMethod=29;
|
---|
8720 | this.nTerms=4;
|
---|
8721 | break;
|
---|
8722 | case 2: this.lastMethod=23;
|
---|
8723 | this.nTerms=3;
|
---|
8724 | break;
|
---|
8725 | case 1: this.lastMethod=24;
|
---|
8726 | this.nTerms=2;
|
---|
8727 | break;
|
---|
8728 | }
|
---|
8729 |
|
---|
8730 | if(!this.scaleFlag)this.nTerms=this.nTerms-1;
|
---|
8731 | this.linNonLin = false;
|
---|
8732 | this.zeroCheck = false;
|
---|
8733 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
8734 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
8735 | String ss = "Pareto";
|
---|
8736 |
|
---|
8737 | // order data into ascending order of the abscissae
|
---|
8738 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
8739 |
|
---|
8740 | // check y data
|
---|
8741 | Double tempd=null;
|
---|
8742 | ArrayList<Object> retY = Regression.dataSign(yData);
|
---|
8743 | tempd = (Double)retY.get(4);
|
---|
8744 | double yPeak = tempd.doubleValue();
|
---|
8745 | Integer tempi = null;
|
---|
8746 | tempi = (Integer)retY.get(5);
|
---|
8747 | int peaki = tempi.intValue();
|
---|
8748 |
|
---|
8749 | // check for infinity
|
---|
8750 | if(this.infinityCheck(yPeak, peaki)){
|
---|
8751 | retY = Regression.dataSign(yData);
|
---|
8752 | tempd = (Double)retY.get(4);
|
---|
8753 | yPeak = tempd.doubleValue();
|
---|
8754 | tempi = null;
|
---|
8755 | tempi = (Integer)retY.get(5);
|
---|
8756 | peaki = tempi.intValue();
|
---|
8757 | }
|
---|
8758 |
|
---|
8759 | // check sign of y data
|
---|
8760 | boolean ySignFlag = false;
|
---|
8761 | if(yPeak<0.0D){
|
---|
8762 | this.reverseYsign(ss);
|
---|
8763 | retY = Regression.dataSign(this.yData);
|
---|
8764 | yPeak = -yPeak;
|
---|
8765 | ySignFlag = true;
|
---|
8766 | }
|
---|
8767 |
|
---|
8768 | // check y values for all very small values
|
---|
8769 | boolean magCheck=false;
|
---|
8770 | double magScale = this.checkYallSmall(yPeak, ss);
|
---|
8771 | if(magScale!=1.0D){
|
---|
8772 | magCheck=true;
|
---|
8773 | yPeak=1.0D;
|
---|
8774 | }
|
---|
8775 |
|
---|
8776 | // minimum value of x
|
---|
8777 | ArrayList<Object> retX = Regression.dataSign(this.xData[0]);
|
---|
8778 | tempd = (Double)retX.get(0);
|
---|
8779 | double xMin = tempd.doubleValue();
|
---|
8780 |
|
---|
8781 | // maximum value of x
|
---|
8782 | tempd = (Double)retX.get(2);
|
---|
8783 | double xMax = tempd.doubleValue();
|
---|
8784 |
|
---|
8785 | // Calculate x value at peak y (estimate of the 'distribution mode')
|
---|
8786 | double distribMode = xData[0][peaki];
|
---|
8787 |
|
---|
8788 | // Calculate an estimate of the half-height width
|
---|
8789 | double sd = Math.log(2.0D)*halfWidth(xData[0], yData);
|
---|
8790 |
|
---|
8791 | // Save x-y-w data
|
---|
8792 | double[] xx = new double[this.nData];
|
---|
8793 | double[] yy = new double[this.nData];
|
---|
8794 | double[] ww = new double[this.nData];
|
---|
8795 |
|
---|
8796 | for(int i=0; i<this.nData; i++){
|
---|
8797 | xx[i]=this.xData[0][i];
|
---|
8798 | yy[i]=this.yData[i];
|
---|
8799 | ww[i]=this.weight[i];
|
---|
8800 | }
|
---|
8801 |
|
---|
8802 | // Calculate the cumulative probability and return ordinate scaling factor estimate
|
---|
8803 | double[] cumX = new double[this.nData];
|
---|
8804 | double[] cumY = new double[this.nData];
|
---|
8805 | double[] cumW = new double[this.nData];
|
---|
8806 | ErrorProp[] cumYe = ErrorProp.oneDarray(this.nData);
|
---|
8807 | double yScale = this.calculateCumulativeValues(cumX, cumY, cumW, cumYe, peaki, yPeak, distribMode, ss);
|
---|
8808 |
|
---|
8809 | //Calculate l - cumlative probability
|
---|
8810 | for(int i=0; i<this.nData; i++){
|
---|
8811 | cumYe[i] = ErrorProp.minus(1.0D,cumYe[i]);
|
---|
8812 | cumY[i] = cumYe[i].getValue();
|
---|
8813 | cumW[i] = cumYe[i].getError();
|
---|
8814 | }
|
---|
8815 |
|
---|
8816 | // Fill data arrays with transformed data
|
---|
8817 | for(int i =0; i<this.nData; i++){
|
---|
8818 | xData[0][i] = cumX[i];
|
---|
8819 | yData[i] = cumY[i];
|
---|
8820 | weight[i] = cumW[i];
|
---|
8821 | }
|
---|
8822 | boolean weightOptHold = this.weightOpt;
|
---|
8823 | this.weightOpt=true;
|
---|
8824 |
|
---|
8825 | // Nelder and Mead Simplex Regression for Pareto estimated cdf
|
---|
8826 | // disable statistical analysis
|
---|
8827 | this.statFlag=false;
|
---|
8828 |
|
---|
8829 | // Fill arrays needed by the Simplex
|
---|
8830 | double[] start = new double[this.nTerms];
|
---|
8831 | double[] step = new double[this.nTerms];
|
---|
8832 | for(int i=0; i<this.nTerms; i++){
|
---|
8833 | start[i]=1.0D;
|
---|
8834 | step[i]=0.2D;
|
---|
8835 | }
|
---|
8836 | switch(typeFlag){
|
---|
8837 | case 3: start[0] = 2; //alpha
|
---|
8838 | start[1] = xMin*0.9D; //beta
|
---|
8839 | if(xMin<0){ //theta
|
---|
8840 | start[2] = -xMin*1.1D;
|
---|
8841 | }
|
---|
8842 | else{
|
---|
8843 | start[2] = xMin*0.01;
|
---|
8844 | }
|
---|
8845 | if(start[1]<0.0D)start[1]=0.0D;
|
---|
8846 | step[0] = 0.2D*start[0];
|
---|
8847 | step[1] = 0.2D*start[1];
|
---|
8848 | if(step[1]==0.0D){
|
---|
8849 | double xmax = xMax;
|
---|
8850 | if(xmax==0.0D){
|
---|
8851 | xmax = xMin;
|
---|
8852 | }
|
---|
8853 | step[1]=xmax*0.1D;
|
---|
8854 | }
|
---|
8855 | this.addConstraint(0,-1,0.0D);
|
---|
8856 | this.addConstraint(1,-1,0.0D);
|
---|
8857 | this.addConstraint(1,+1,xMin);
|
---|
8858 | break;
|
---|
8859 | case 2: if(xMin<0)System.out.println("Method: FitParetoTwoPar/FitParetoTwoParPlot\nNegative data values present\nFitParetoShifted/FitParetoShiftedPlot would have been more appropriate");
|
---|
8860 | start[0] = 2; //alpha
|
---|
8861 | start[1] = xMin*0.9D; //beta
|
---|
8862 | if(start[1]<0.0D)start[1]=0.0D;
|
---|
8863 | step[0] = 0.2D*start[0];
|
---|
8864 | step[1] = 0.2D*start[1];
|
---|
8865 | if(step[1]==0.0D){
|
---|
8866 | double xmax = xMax;
|
---|
8867 | if(xmax==0.0D){
|
---|
8868 | xmax = xMin;
|
---|
8869 | }
|
---|
8870 | step[1]=xmax*0.1D;
|
---|
8871 | }
|
---|
8872 | this.addConstraint(0,-1,0.0D);
|
---|
8873 | this.addConstraint(1,-1,0.0D);
|
---|
8874 | break;
|
---|
8875 | case 1: if(xMin<0)System.out.println("Method: FitParetoOnePar/FitParetoOneParPlot\nNegative data values present\nFitParetoShifted/FitParetoShiftedPlot would have been more appropriate");
|
---|
8876 | start[0] = 2; //alpha
|
---|
8877 | step[0] = 0.2D*start[0];
|
---|
8878 | this.addConstraint(0,-1,0.0D);
|
---|
8879 | this.addConstraint(1,-1,0.0D);
|
---|
8880 | break;
|
---|
8881 | }
|
---|
8882 |
|
---|
8883 | // Create instance of cdf function and perform regression
|
---|
8884 | ParetoFunctionTwo f = new ParetoFunctionTwo();
|
---|
8885 | f.typeFlag = typeFlag;
|
---|
8886 | Object regFun2 = (Object)f;
|
---|
8887 | this.nelderMead(regFun2, start, step, this.fTol, this.nMax);
|
---|
8888 |
|
---|
8889 | // Get best estimates of cdf regression
|
---|
8890 | double[] ests = Conv.copy(this.best);
|
---|
8891 |
|
---|
8892 | // Nelder and Mead Simplex Regression for Pareto
|
---|
8893 | // using best estimates from cdf regression as initial estimates
|
---|
8894 |
|
---|
8895 | // enable statistical analysis
|
---|
8896 | this.statFlag=true;
|
---|
8897 |
|
---|
8898 | // restore data reversing the cdf transform but maintaining any sign reversals
|
---|
8899 | this.weightOpt=weightOptHold;
|
---|
8900 | for(int i =0; i<this.nData; i++){
|
---|
8901 | xData[0][i] = xx[i];
|
---|
8902 | yData[i] = yy[i];
|
---|
8903 | weight[i] = ww[i];
|
---|
8904 | }
|
---|
8905 |
|
---|
8906 | // Fill arrays needed by the Simplex
|
---|
8907 | switch(typeFlag){
|
---|
8908 | case 3: start[0] = ests[0]; //alpha
|
---|
8909 | if(start[0]<=0.0D){
|
---|
8910 | if(start[0]==0.0D){
|
---|
8911 | start[0]=1.0D;
|
---|
8912 | }
|
---|
8913 | else{
|
---|
8914 | start[0] = Math.min(1.0D,-start[0]);
|
---|
8915 | }
|
---|
8916 | }
|
---|
8917 | start[1] = ests[1]; //beta
|
---|
8918 | if(start[1]<=0.0D){
|
---|
8919 | if(start[1]==0.0D){
|
---|
8920 | start[1]=1.0D;
|
---|
8921 | }
|
---|
8922 | else{
|
---|
8923 | start[1] = Math.min(1.0D,-start[1]);
|
---|
8924 | }
|
---|
8925 | }
|
---|
8926 | start[2] = ests[2];
|
---|
8927 | if(this.scaleFlag){
|
---|
8928 | start[3] = 1.0/yScale; //y axis scaling factor
|
---|
8929 | }
|
---|
8930 | step[0] = 0.1D*start[0];
|
---|
8931 | step[1] = 0.1D*start[1];
|
---|
8932 | if(step[1]==0.0D){
|
---|
8933 | double xmax = xMax;
|
---|
8934 | if(xmax==0.0D){
|
---|
8935 | xmax = xMin;
|
---|
8936 | }
|
---|
8937 | step[1]=xmax*0.1D;
|
---|
8938 | }
|
---|
8939 | if(this.scaleFlag)step[2] = 0.1D*start[2];
|
---|
8940 | break;
|
---|
8941 | case 2: start[0] = ests[0]; //alpha
|
---|
8942 | if(start[0]<=0.0D){
|
---|
8943 | if(start[0]==0.0D){
|
---|
8944 | start[0]=1.0D;
|
---|
8945 | }
|
---|
8946 | else{
|
---|
8947 | start[0] = Math.min(1.0D,-start[0]);
|
---|
8948 | }
|
---|
8949 | }
|
---|
8950 | start[1] = ests[1]; //beta
|
---|
8951 | if(start[1]<=0.0D){
|
---|
8952 | if(start[1]==0.0D){
|
---|
8953 | start[1]=1.0D;
|
---|
8954 | }
|
---|
8955 | else{
|
---|
8956 | start[1] = Math.min(1.0D,-start[1]);
|
---|
8957 | }
|
---|
8958 | }
|
---|
8959 | if(this.scaleFlag){
|
---|
8960 | start[2] = 1.0/yScale; //y axis scaling factor
|
---|
8961 | }
|
---|
8962 | step[0] = 0.1D*start[0];
|
---|
8963 | step[1] = 0.1D*start[1];
|
---|
8964 | if(step[1]==0.0D){
|
---|
8965 | double xmax = xMax;
|
---|
8966 | if(xmax==0.0D){
|
---|
8967 | xmax = xMin;
|
---|
8968 | }
|
---|
8969 | step[1]=xmax*0.1D;
|
---|
8970 | }
|
---|
8971 | if(this.scaleFlag)step[2] = 0.1D*start[2];
|
---|
8972 | break;
|
---|
8973 | case 1: start[0] = ests[0]; //alpha
|
---|
8974 | if(start[0]<=0.0D){
|
---|
8975 | if(start[0]==0.0D){
|
---|
8976 | start[0]=1.0D;
|
---|
8977 | }
|
---|
8978 | else{
|
---|
8979 | start[0] = Math.min(1.0D,-start[0]);
|
---|
8980 | }
|
---|
8981 | }
|
---|
8982 | if(this.scaleFlag){
|
---|
8983 | start[1] = 1.0/yScale; //y axis scaling factor
|
---|
8984 | }
|
---|
8985 | step[0] = 0.1D*start[0];
|
---|
8986 | if(this.scaleFlag)step[1] = 0.1D*start[1];
|
---|
8987 | break;
|
---|
8988 | }
|
---|
8989 |
|
---|
8990 | // Create instance of Pareto function and perform regression
|
---|
8991 | ParetoFunctionOne ff = new ParetoFunctionOne();
|
---|
8992 | ff.typeFlag = typeFlag;
|
---|
8993 | ff.scaleOption = this.scaleFlag;
|
---|
8994 | ff.scaleFactor = this.yScaleFactor;
|
---|
8995 | Object regFun3 = (Object)ff;
|
---|
8996 | this.nelderMead(regFun3, start, step, this.fTol, this.nMax);
|
---|
8997 |
|
---|
8998 | if(allTest==1){
|
---|
8999 | // Print results
|
---|
9000 | if(!this.supressPrint)this.print();
|
---|
9001 | // Plot results
|
---|
9002 | int flag = this.plotXY(ff);
|
---|
9003 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
9004 | }
|
---|
9005 |
|
---|
9006 | // restore data
|
---|
9007 | this.weightOpt = weightOptHold;
|
---|
9008 | if(magCheck){
|
---|
9009 | for(int i =0; i<this.nData; i++){
|
---|
9010 | this.yData[i] = yy[i]/magScale;
|
---|
9011 | if(this.weightOpt)this.weight[i] = ww[i]/magScale;
|
---|
9012 | }
|
---|
9013 | }
|
---|
9014 | if(ySignFlag){
|
---|
9015 | for(int i =0; i<this.nData; i++){
|
---|
9016 | this.yData[i]=-this.yData[i];
|
---|
9017 | }
|
---|
9018 | }
|
---|
9019 | }
|
---|
9020 |
|
---|
9021 |
|
---|
9022 | // method for fitting data to a sigmoid threshold function
|
---|
9023 | public void sigmoidThreshold(){
|
---|
9024 | fitSigmoidThreshold(0);
|
---|
9025 | }
|
---|
9026 |
|
---|
9027 | // method for fitting data to a sigmoid threshold function with plot and print out
|
---|
9028 | public void sigmoidThresholdPlot(){
|
---|
9029 | fitSigmoidThreshold(1);
|
---|
9030 | }
|
---|
9031 |
|
---|
9032 |
|
---|
9033 | // method for fitting data to a sigmoid threshold function
|
---|
9034 | protected void fitSigmoidThreshold(int plotFlag){
|
---|
9035 |
|
---|
9036 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
9037 | this.lastMethod=25;
|
---|
9038 | this.userSupplied = false;
|
---|
9039 | this.linNonLin = false;
|
---|
9040 | this.zeroCheck = false;
|
---|
9041 | this.nTerms=3;
|
---|
9042 | if(!this.scaleFlag)this.nTerms=2;
|
---|
9043 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
9044 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
9045 |
|
---|
9046 | // order data into ascending order of the abscissae
|
---|
9047 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
9048 |
|
---|
9049 | // Estimate of theta
|
---|
9050 | double yymin = Fmath.minimum(this.yData);
|
---|
9051 | double yymax = Fmath.maximum(this.yData);
|
---|
9052 | int dirFlag = 1;
|
---|
9053 | if(yymin<0)dirFlag=-1;
|
---|
9054 | double yyymid = (yymax - yymin)/2.0D;
|
---|
9055 | double yyxmidl = xData[0][0];
|
---|
9056 | int ii = 1;
|
---|
9057 | int nLen = this.yData.length;
|
---|
9058 | boolean test = true;
|
---|
9059 | while(test){
|
---|
9060 | if(this.yData[ii]>=dirFlag*yyymid){
|
---|
9061 | yyxmidl = xData[0][ii];
|
---|
9062 | test = false;
|
---|
9063 | }
|
---|
9064 | else{
|
---|
9065 | ii++;
|
---|
9066 | if(ii>=nLen){
|
---|
9067 | yyxmidl = Stat.mean(this.xData[0]);
|
---|
9068 | ii=nLen-1;
|
---|
9069 | test = false;
|
---|
9070 | }
|
---|
9071 | }
|
---|
9072 | }
|
---|
9073 | double yyxmidh = xData[0][nLen-1];
|
---|
9074 | int jj = nLen-1;
|
---|
9075 | test = true;
|
---|
9076 | while(test){
|
---|
9077 | if(this.yData[jj]<=dirFlag*yyymid){
|
---|
9078 | yyxmidh = xData[0][jj];
|
---|
9079 | test = false;
|
---|
9080 | }
|
---|
9081 | else{
|
---|
9082 | jj--;
|
---|
9083 | if(jj<0){
|
---|
9084 | yyxmidh = Stat.mean(this.xData[0]);
|
---|
9085 | jj=1;
|
---|
9086 | test = false;
|
---|
9087 | }
|
---|
9088 | }
|
---|
9089 | }
|
---|
9090 | int thetaPos = (ii+jj)/2;
|
---|
9091 | double theta0 = xData[0][thetaPos];
|
---|
9092 |
|
---|
9093 | // estimate of slope
|
---|
9094 | double thetaSlope1 = 2.0D*(yData[nLen-1] - theta0)/(xData[0][nLen-1] - xData[0][thetaPos]);
|
---|
9095 | double thetaSlope2 = 2.0D*theta0/(xData[0][thetaPos] - xData[0][nLen-1]);
|
---|
9096 | double thetaSlope = Math.max(thetaSlope1, thetaSlope2);
|
---|
9097 |
|
---|
9098 | // initial estimates
|
---|
9099 | double[] start = new double[nTerms];
|
---|
9100 | start[0] = 4.0D*thetaSlope;
|
---|
9101 | if(dirFlag==1){
|
---|
9102 | start[0] /= yymax;
|
---|
9103 | }
|
---|
9104 | else{
|
---|
9105 | start[0] /= yymin;
|
---|
9106 | }
|
---|
9107 | start[1] = theta0;
|
---|
9108 | if(this.scaleFlag){
|
---|
9109 | if(dirFlag==1){
|
---|
9110 | start[2] = yymax;
|
---|
9111 | }
|
---|
9112 | else{
|
---|
9113 | start[2] = yymin;
|
---|
9114 | }
|
---|
9115 | }
|
---|
9116 |
|
---|
9117 | // initial step sizes
|
---|
9118 | double[] step = new double[nTerms];
|
---|
9119 | for(int i=0; i<nTerms; i++)step[i] = 0.1*start[i];
|
---|
9120 | if(step[0]==0.0D)step[0] = 0.1*(xData[0][nLen-1] - xData[0][0])/(yData[nLen-1] - yData[0]);
|
---|
9121 | if(step[1]==0.0D)step[1] = (xData[0][nLen-1] - xData[0][0])/20.0D;
|
---|
9122 | if(this.scaleFlag)if(step[2]==0.0D)step[2] = 0.1*(yData[nLen-1] - yData[0]);
|
---|
9123 |
|
---|
9124 | // Nelder and Mead Simplex Regression
|
---|
9125 | SigmoidThresholdFunction f = new SigmoidThresholdFunction();
|
---|
9126 | f.scaleOption = this.scaleFlag;
|
---|
9127 | f.scaleFactor = this.yScaleFactor;
|
---|
9128 | Object regFun2 = (Object)f;
|
---|
9129 | this.nelderMead(regFun2, start, step, this.fTol, this.nMax);
|
---|
9130 |
|
---|
9131 | if(plotFlag==1){
|
---|
9132 | // Print results
|
---|
9133 | if(!this.supressPrint)this.print();
|
---|
9134 |
|
---|
9135 | // Plot results
|
---|
9136 | int flag = this.plotXY(f);
|
---|
9137 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
9138 | }
|
---|
9139 | }
|
---|
9140 | // method for fitting data to a Hill/Sips Sigmoid
|
---|
9141 | public void sigmoidHillSips(){
|
---|
9142 | fitsigmoidHillSips(0);
|
---|
9143 | }
|
---|
9144 |
|
---|
9145 | // method for fitting data to a Hill/Sips Sigmoid with plot and print out
|
---|
9146 | public void sigmoidHillSipsPlot(){
|
---|
9147 | fitsigmoidHillSips(1);
|
---|
9148 | }
|
---|
9149 |
|
---|
9150 | // method for fitting data to a Hill/Sips Sigmoid
|
---|
9151 | protected void fitsigmoidHillSips(int plotFlag){
|
---|
9152 |
|
---|
9153 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
9154 | this.lastMethod=28;
|
---|
9155 | this.userSupplied = false;
|
---|
9156 | this.linNonLin = false;
|
---|
9157 | this.zeroCheck = false;
|
---|
9158 | this.nTerms=3;
|
---|
9159 | if(!this.scaleFlag)this.nTerms=2;
|
---|
9160 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
9161 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
9162 |
|
---|
9163 | // order data into ascending order of the abscissae
|
---|
9164 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
9165 |
|
---|
9166 | // Estimate of theta
|
---|
9167 | int nLen = this.yData.length;
|
---|
9168 | this.midPoint();
|
---|
9169 | double theta0 = this.midPointYvalue;
|
---|
9170 |
|
---|
9171 | // initial estimates
|
---|
9172 | double[] start = new double[nTerms];
|
---|
9173 | start[0] = theta0;
|
---|
9174 | if(this.directionFlag==1){
|
---|
9175 | start[1] = 1;
|
---|
9176 | }
|
---|
9177 | else{
|
---|
9178 | start[1] = -1;
|
---|
9179 | }
|
---|
9180 | if(this.scaleFlag){
|
---|
9181 | start[2] = this.top - this.bottom;
|
---|
9182 | }
|
---|
9183 |
|
---|
9184 | // initial step sizes
|
---|
9185 | double[] step = new double[nTerms];
|
---|
9186 | for(int i=0; i<this.nTerms; i++)step[i] = 0.1*start[i];
|
---|
9187 | if(step[0]==0.0D)step[0] = (this.xData[0][nLen-1] - this.xData[0][0])/20.0D;
|
---|
9188 | if(this.scaleFlag)if(step[2]==0.0D)step[2] = 0.1*(this.yData[nLen-1] - this.yData[0]);
|
---|
9189 |
|
---|
9190 | // Nelder and Mead Simplex Regression
|
---|
9191 | SigmoidHillSipsFunction f = new SigmoidHillSipsFunction();
|
---|
9192 | f.scaleOption = this.scaleFlag;
|
---|
9193 | f.scaleFactor = this.yScaleFactor;
|
---|
9194 | Object regFun2 = (Object)f;
|
---|
9195 | this.nelderMead(regFun2, start, step, this.fTol, this.nMax);
|
---|
9196 |
|
---|
9197 | if(plotFlag==1){
|
---|
9198 | // Print results
|
---|
9199 | if(!this.supressPrint)this.print();
|
---|
9200 |
|
---|
9201 | // Plot results
|
---|
9202 | int flag = this.plotXY(f);
|
---|
9203 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
9204 | }
|
---|
9205 | }
|
---|
9206 |
|
---|
9207 | // method for fitting data to a EC50 dose response curve
|
---|
9208 | public void ec50(){
|
---|
9209 | fitEC50(0);
|
---|
9210 | }
|
---|
9211 |
|
---|
9212 | // method for fitting data to a EC50 dose response curve with plot and print out
|
---|
9213 | public void ec50Plot(){
|
---|
9214 | fitEC50(1);
|
---|
9215 | }
|
---|
9216 |
|
---|
9217 | // method for fitting data to a EC50 dose response curve
|
---|
9218 | // bottom constrained to zero or positive values
|
---|
9219 | public void ec50constrained(){
|
---|
9220 | fitEC50(2);
|
---|
9221 | }
|
---|
9222 |
|
---|
9223 | // method for fitting data to a EC50 dose response curve with plot and print out
|
---|
9224 | // bottom constrained to zero or positive values
|
---|
9225 | public void ec50constrainedPlot(){
|
---|
9226 | fitEC50(3);
|
---|
9227 | }
|
---|
9228 |
|
---|
9229 |
|
---|
9230 |
|
---|
9231 | // Estimate mid point of sigmoid curves
|
---|
9232 | private void midPoint(){
|
---|
9233 | // Estimate of bottom and top
|
---|
9234 | this.bottom = Fmath.minimum(this.yData);
|
---|
9235 | this.top = Fmath.maximum(this.yData);
|
---|
9236 | this.bottomIndex = 0;
|
---|
9237 | this.topIndex = 0;
|
---|
9238 | int nLen = this.yData.length;
|
---|
9239 | int ii = 0;
|
---|
9240 | boolean test = true;
|
---|
9241 | while(test){
|
---|
9242 | if(this.bottom==this.yData[ii]){
|
---|
9243 | this.bottomIndex = ii;
|
---|
9244 | test = false;
|
---|
9245 | }
|
---|
9246 | else{
|
---|
9247 | ii++;
|
---|
9248 | if(ii>=nLen)throw new IllegalArgumentException("This should not be possible - check coding");
|
---|
9249 | }
|
---|
9250 | }
|
---|
9251 | test = true;
|
---|
9252 | ii = 0;
|
---|
9253 | while(test){
|
---|
9254 | if(this.top==this.yData[ii]){
|
---|
9255 | this.topIndex = ii;
|
---|
9256 | test = false;
|
---|
9257 | }
|
---|
9258 | else{
|
---|
9259 | ii++;
|
---|
9260 | if(ii>=nLen)throw new IllegalArgumentException("This should not be possible - check coding");
|
---|
9261 | }
|
---|
9262 | }
|
---|
9263 | this.directionFlag = 1;
|
---|
9264 | if(this.topIndex<this.bottomIndex)this.directionFlag = -1;
|
---|
9265 |
|
---|
9266 | // Estimate of midpoint
|
---|
9267 | double yyymid = (this.top - this.bottom)/2.0D;
|
---|
9268 | double yyxmidl = this.xData[0][0];
|
---|
9269 | ii = 0;
|
---|
9270 | this.midPointIndex = 0;
|
---|
9271 | if(this.directionFlag==1){
|
---|
9272 | test = true;
|
---|
9273 | while(test){
|
---|
9274 | if(this.yData[ii]>=yyymid){
|
---|
9275 | yyxmidl = this.xData[0][ii];
|
---|
9276 | test = false;
|
---|
9277 | }
|
---|
9278 | else{
|
---|
9279 | ii++;
|
---|
9280 | if(ii>=nLen){
|
---|
9281 | yyxmidl = Stat.mean(this.xData[0]);
|
---|
9282 | ii=nLen-1;
|
---|
9283 | test = false;
|
---|
9284 | }
|
---|
9285 | }
|
---|
9286 | }
|
---|
9287 | double yyxmidh = this.xData[0][nLen-1];
|
---|
9288 | int jj = nLen-1;
|
---|
9289 | test = true;
|
---|
9290 | while(test){
|
---|
9291 | if(this.yData[jj]<=yyymid){
|
---|
9292 | yyxmidh = this.xData[0][jj];
|
---|
9293 | test = false;
|
---|
9294 | }
|
---|
9295 | else{
|
---|
9296 | jj--;
|
---|
9297 | if(jj<0){
|
---|
9298 | yyxmidh = Stat.mean(this.xData[0]);
|
---|
9299 | jj=1;
|
---|
9300 | test = false;
|
---|
9301 | }
|
---|
9302 | }
|
---|
9303 | }
|
---|
9304 | this.midPointIndex = (ii+jj)/2;
|
---|
9305 | }
|
---|
9306 | else{
|
---|
9307 | ii = 0;
|
---|
9308 | test = true;
|
---|
9309 | while(test){
|
---|
9310 | if(this.yData[ii]<=yyymid){
|
---|
9311 | yyxmidl = this.xData[0][ii];
|
---|
9312 | test = false;
|
---|
9313 | }
|
---|
9314 | else{
|
---|
9315 | ii++;
|
---|
9316 | if(ii>=nLen){
|
---|
9317 | yyxmidl = Stat.mean(this.xData[0]);
|
---|
9318 | ii=nLen-1;
|
---|
9319 | test = false;
|
---|
9320 | }
|
---|
9321 | }
|
---|
9322 | }
|
---|
9323 | double yyxmidh = this.xData[0][nLen-1];
|
---|
9324 | int jj = nLen-1;
|
---|
9325 | test = true;
|
---|
9326 | while(test){
|
---|
9327 | if(this.yData[jj]>=yyymid){
|
---|
9328 | yyxmidh = this.xData[0][jj];
|
---|
9329 | test = false;
|
---|
9330 | }
|
---|
9331 | else{
|
---|
9332 | jj--;
|
---|
9333 | if(jj<0){
|
---|
9334 | yyxmidh = Stat.mean(this.xData[0]);
|
---|
9335 | jj=1;
|
---|
9336 | test = false;
|
---|
9337 | }
|
---|
9338 | }
|
---|
9339 | }
|
---|
9340 | this.midPointIndex = (ii+jj)/2;
|
---|
9341 | }
|
---|
9342 | this.midPointXvalue = this.xData[0][this.midPointIndex];
|
---|
9343 | this.midPointYvalue = this.yData[this.midPointIndex];
|
---|
9344 | }
|
---|
9345 |
|
---|
9346 | // method for fitting data to a logEC50 dose response curve
|
---|
9347 | protected void fitEC50(int cpFlag){
|
---|
9348 |
|
---|
9349 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
9350 | int plotFlag = 0;
|
---|
9351 | boolean constrained = false;
|
---|
9352 | this.userSupplied = false;
|
---|
9353 | switch(cpFlag){
|
---|
9354 | case 0: this.lastMethod= 39;
|
---|
9355 | plotFlag = 0;
|
---|
9356 | break;
|
---|
9357 | case 1: this.lastMethod= 39;
|
---|
9358 | plotFlag = 1;
|
---|
9359 | break;
|
---|
9360 | case 2: this.lastMethod= 41;
|
---|
9361 | plotFlag = 0;
|
---|
9362 | constrained = true;
|
---|
9363 | break;
|
---|
9364 | case 3: this.lastMethod= 41;
|
---|
9365 | plotFlag = 1;
|
---|
9366 | constrained = true;
|
---|
9367 | break;
|
---|
9368 | }
|
---|
9369 |
|
---|
9370 | this.linNonLin = false;
|
---|
9371 | this.zeroCheck = false;
|
---|
9372 | this.nTerms=4;
|
---|
9373 | this.scaleFlag = false;
|
---|
9374 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
9375 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
9376 |
|
---|
9377 | // order data into ascending order of the abscissae
|
---|
9378 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
9379 |
|
---|
9380 | // Initial estimate of EC50
|
---|
9381 | int nLen = this.yData.length;
|
---|
9382 | this.midPoint();
|
---|
9383 |
|
---|
9384 | // estimate of slope
|
---|
9385 | double thetaSlope1 = 2.0D*(this.yData[nLen-1] - this.midPointYvalue)/(this.xData[0][nLen-1] - this.xData[0][this.midPointIndex]);
|
---|
9386 | double thetaSlope2 = 2.0D*(this.midPointYvalue - this.yData[0])/(this.xData[0][this.midPointIndex] - this.xData[0][nLen-1]);
|
---|
9387 | double hillSlope = Math.max(thetaSlope1, thetaSlope2);
|
---|
9388 |
|
---|
9389 | // initial estimates
|
---|
9390 | double[] start = new double[nTerms];
|
---|
9391 | start[0] = this.bottom;
|
---|
9392 | start[1] = this.top;
|
---|
9393 | start[2] = this.midPointYvalue;
|
---|
9394 | start[3] = Math.abs(hillSlope);
|
---|
9395 | if(this.directionFlag==1)start[3] = -Math.abs(hillSlope);
|
---|
9396 |
|
---|
9397 | // initial step sizes
|
---|
9398 | double[] step = new double[nTerms];
|
---|
9399 | for(int i=0; i<nTerms; i++)step[i] = 0.1*start[i];
|
---|
9400 | if(step[0]==0.0D)step[0] = 0.1*(yData[nLen-1] - yData[0]);
|
---|
9401 | if(step[1]==0.0D)step[1] = 0.1*(yData[nLen-1] - yData[0]) + yData[nLen-1];
|
---|
9402 | if(step[2]==0.0D)step[2] = 0.05*(xData[0][nLen-1] - xData[0][0]);
|
---|
9403 | if(step[3]==0.0D)step[3] = 0.1*(xData[0][nLen-1] - xData[0][0])/(yData[nLen-1] - yData[0]);
|
---|
9404 |
|
---|
9405 | // Constrained option
|
---|
9406 | if(constrained)this.addConstraint(0, -1, 0.0D);
|
---|
9407 |
|
---|
9408 | // Nelder and Mead Simplex Regression
|
---|
9409 | EC50Function f = new EC50Function();
|
---|
9410 | Object regFun2 = (Object)f;
|
---|
9411 | this.nelderMead(regFun2, start, step, this.fTol, this.nMax);
|
---|
9412 |
|
---|
9413 | if(plotFlag==1){
|
---|
9414 | // Print results
|
---|
9415 | if(!this.supressPrint)this.print();
|
---|
9416 |
|
---|
9417 | // Plot results
|
---|
9418 | int flag = this.plotXY(f);
|
---|
9419 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
9420 | }
|
---|
9421 | }
|
---|
9422 |
|
---|
9423 | // method for fitting data to a rectangular hyberbola
|
---|
9424 | public void rectangularHyperbola(){
|
---|
9425 | fitRectangularHyperbola(0);
|
---|
9426 | }
|
---|
9427 |
|
---|
9428 | // method for fitting data to a rectangular hyberbola with plot and print out
|
---|
9429 | public void rectangularHyperbolaPlot(){
|
---|
9430 | fitRectangularHyperbola(1);
|
---|
9431 | }
|
---|
9432 |
|
---|
9433 | // method for fitting data to a rectangular hyperbola
|
---|
9434 | protected void fitRectangularHyperbola(int plotFlag){
|
---|
9435 |
|
---|
9436 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
9437 | this.lastMethod=26;
|
---|
9438 | this.userSupplied = false;
|
---|
9439 | this.linNonLin = false;
|
---|
9440 | this.zeroCheck = false;
|
---|
9441 | this.nTerms=2;
|
---|
9442 | if(!this.scaleFlag)this.nTerms=1;
|
---|
9443 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
9444 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
9445 |
|
---|
9446 | // order data into ascending order of the abscissae
|
---|
9447 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
9448 |
|
---|
9449 | // Estimate of theta
|
---|
9450 | double yymin = Fmath.minimum(this.yData);
|
---|
9451 | double yymax = Fmath.maximum(this.yData);
|
---|
9452 | int dirFlag = 1;
|
---|
9453 | if(yymin<0)dirFlag=-1;
|
---|
9454 | double yyymid = (yymax - yymin)/2.0D;
|
---|
9455 | double yyxmidl = xData[0][0];
|
---|
9456 | int ii = 1;
|
---|
9457 | int nLen = this.yData.length;
|
---|
9458 | boolean test = true;
|
---|
9459 | while(test){
|
---|
9460 | if(this.yData[ii]>=dirFlag*yyymid){
|
---|
9461 | yyxmidl = xData[0][ii];
|
---|
9462 | test = false;
|
---|
9463 | }
|
---|
9464 | else{
|
---|
9465 | ii++;
|
---|
9466 | if(ii>=nLen){
|
---|
9467 | yyxmidl = Stat.mean(this.xData[0]);
|
---|
9468 | ii=nLen-1;
|
---|
9469 | test = false;
|
---|
9470 | }
|
---|
9471 | }
|
---|
9472 | }
|
---|
9473 | double yyxmidh = xData[0][nLen-1];
|
---|
9474 | int jj = nLen-1;
|
---|
9475 | test = true;
|
---|
9476 | while(test){
|
---|
9477 | if(this.yData[jj]<=dirFlag*yyymid){
|
---|
9478 | yyxmidh = xData[0][jj];
|
---|
9479 | test = false;
|
---|
9480 | }
|
---|
9481 | else{
|
---|
9482 | jj--;
|
---|
9483 | if(jj<0){
|
---|
9484 | yyxmidh = Stat.mean(this.xData[0]);
|
---|
9485 | jj=1;
|
---|
9486 | test = false;
|
---|
9487 | }
|
---|
9488 | }
|
---|
9489 | }
|
---|
9490 | int thetaPos = (ii+jj)/2;
|
---|
9491 | double theta0 = xData[0][thetaPos];
|
---|
9492 |
|
---|
9493 | // initial estimates
|
---|
9494 | double[] start = new double[nTerms];
|
---|
9495 | start[0] = theta0;
|
---|
9496 | if(this.scaleFlag){
|
---|
9497 | if(dirFlag==1){
|
---|
9498 | start[1] = yymax;
|
---|
9499 | }
|
---|
9500 | else{
|
---|
9501 | start[1] = yymin;
|
---|
9502 | }
|
---|
9503 | }
|
---|
9504 |
|
---|
9505 | // initial step sizes
|
---|
9506 | double[] step = new double[nTerms];
|
---|
9507 | for(int i=0; i<nTerms; i++)step[i] = 0.1*start[i];
|
---|
9508 | if(step[0]==0.0D)step[0] = (xData[0][nLen-1] - xData[0][0])/20.0D;
|
---|
9509 | if(this.scaleFlag)if(step[1]==0.0D)step[1] = 0.1*(yData[nLen-1] - yData[0]);
|
---|
9510 |
|
---|
9511 | // Nelder and Mead Simplex Regression
|
---|
9512 | RectangularHyperbolaFunction f = new RectangularHyperbolaFunction();
|
---|
9513 | f.scaleOption = this.scaleFlag;
|
---|
9514 | f.scaleFactor = this.yScaleFactor;
|
---|
9515 | Object regFun2 = (Object)f;
|
---|
9516 | this.nelderMead(regFun2, start, step, this.fTol, this.nMax);
|
---|
9517 |
|
---|
9518 | if(plotFlag==1){
|
---|
9519 | // Print results
|
---|
9520 | if(!this.supressPrint)this.print();
|
---|
9521 |
|
---|
9522 | // Plot results
|
---|
9523 | int flag = this.plotXY(f);
|
---|
9524 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
9525 | }
|
---|
9526 | }
|
---|
9527 |
|
---|
9528 | // method for fitting data to a scaled Heaviside Step Function
|
---|
9529 | public void stepFunction(){
|
---|
9530 | fitStepFunction(0);
|
---|
9531 | }
|
---|
9532 |
|
---|
9533 | // method for fitting data to a scaled Heaviside Step Function with plot and print out
|
---|
9534 | public void stepFunctionPlot(){
|
---|
9535 | fitStepFunction(1);
|
---|
9536 | }
|
---|
9537 |
|
---|
9538 | // method for fitting data to a scaled Heaviside Step Function
|
---|
9539 | protected void fitStepFunction(int plotFlag){
|
---|
9540 |
|
---|
9541 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
9542 | this.lastMethod=27;
|
---|
9543 | this.userSupplied = false;
|
---|
9544 | this.linNonLin = false;
|
---|
9545 | this.zeroCheck = false;
|
---|
9546 | this.nTerms=2;
|
---|
9547 | if(!this.scaleFlag)this.nTerms=1;
|
---|
9548 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
9549 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
9550 |
|
---|
9551 | // order data into ascending order of the abscissae
|
---|
9552 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
9553 |
|
---|
9554 | // Estimate of theta
|
---|
9555 | double yymin = Fmath.minimum(this.yData);
|
---|
9556 | double yymax = Fmath.maximum(this.yData);
|
---|
9557 | int dirFlag = 1;
|
---|
9558 | if(yymin<0)dirFlag=-1;
|
---|
9559 | double yyymid = (yymax - yymin)/2.0D;
|
---|
9560 | double yyxmidl = xData[0][0];
|
---|
9561 | int ii = 1;
|
---|
9562 | int nLen = this.yData.length;
|
---|
9563 | boolean test = true;
|
---|
9564 | while(test){
|
---|
9565 | if(this.yData[ii]>=dirFlag*yyymid){
|
---|
9566 | yyxmidl = xData[0][ii];
|
---|
9567 | test = false;
|
---|
9568 | }
|
---|
9569 | else{
|
---|
9570 | ii++;
|
---|
9571 | if(ii>=nLen){
|
---|
9572 | yyxmidl = Stat.mean(this.xData[0]);
|
---|
9573 | ii=nLen-1;
|
---|
9574 | test = false;
|
---|
9575 | }
|
---|
9576 | }
|
---|
9577 | }
|
---|
9578 | double yyxmidh = xData[0][nLen-1];
|
---|
9579 | int jj = nLen-1;
|
---|
9580 | test = true;
|
---|
9581 | while(test){
|
---|
9582 | if(this.yData[jj]<=dirFlag*yyymid){
|
---|
9583 | yyxmidh = xData[0][jj];
|
---|
9584 | test = false;
|
---|
9585 | }
|
---|
9586 | else{
|
---|
9587 | jj--;
|
---|
9588 | if(jj<0){
|
---|
9589 | yyxmidh = Stat.mean(this.xData[0]);
|
---|
9590 | jj=1;
|
---|
9591 | test = false;
|
---|
9592 | }
|
---|
9593 | }
|
---|
9594 | }
|
---|
9595 | int thetaPos = (ii+jj)/2;
|
---|
9596 | double theta0 = xData[0][thetaPos];
|
---|
9597 |
|
---|
9598 | // initial estimates
|
---|
9599 | double[] start = new double[nTerms];
|
---|
9600 | start[0] = theta0;
|
---|
9601 | if(this.scaleFlag){
|
---|
9602 | if(dirFlag==1){
|
---|
9603 | start[1] = yymax;
|
---|
9604 | }
|
---|
9605 | else{
|
---|
9606 | start[1] = yymin;
|
---|
9607 | }
|
---|
9608 | }
|
---|
9609 |
|
---|
9610 | // initial step sizes
|
---|
9611 | double[] step = new double[nTerms];
|
---|
9612 | for(int i=0; i<nTerms; i++)step[i] = 0.1*start[i];
|
---|
9613 | if(step[0]==0.0D)step[0] = (xData[0][nLen-1] - xData[0][0])/20.0D;
|
---|
9614 | if(this.scaleFlag)if(step[1]==0.0D)step[1] = 0.1*(yData[nLen-1] - yData[0]);
|
---|
9615 |
|
---|
9616 | // Nelder and Mead Simplex Regression
|
---|
9617 | StepFunctionFunction f = new StepFunctionFunction();
|
---|
9618 | f.scaleOption = this.scaleFlag;
|
---|
9619 | f.scaleFactor = this.yScaleFactor;
|
---|
9620 | Object regFun2 = (Object)f;
|
---|
9621 | this.nelderMead(regFun2, start, step, this.fTol, this.nMax);
|
---|
9622 |
|
---|
9623 | if(plotFlag==1){
|
---|
9624 | // Print results
|
---|
9625 | if(!this.supressPrint)this.print();
|
---|
9626 |
|
---|
9627 | // Plot results
|
---|
9628 | int flag = this.plotXY(f);
|
---|
9629 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
9630 | }
|
---|
9631 | }
|
---|
9632 |
|
---|
9633 | // Fit to a Logistic
|
---|
9634 | public void logistic(){
|
---|
9635 | this.fitLogistic(0);
|
---|
9636 | }
|
---|
9637 |
|
---|
9638 | // Fit to a Logistic
|
---|
9639 | public void logisticPlot(){
|
---|
9640 |
|
---|
9641 | this.fitLogistic(1);
|
---|
9642 | }
|
---|
9643 |
|
---|
9644 | // Fit data to a Logistic probability function
|
---|
9645 | protected void fitLogistic(int plotFlag){
|
---|
9646 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
9647 | this.lastMethod=30;
|
---|
9648 | this.userSupplied = false;
|
---|
9649 | this.linNonLin = false;
|
---|
9650 | this.zeroCheck = false;
|
---|
9651 | this.nTerms=3;
|
---|
9652 | if(!this.scaleFlag)this.nTerms=2;
|
---|
9653 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
9654 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
9655 |
|
---|
9656 | // order data into ascending order of the abscissae
|
---|
9657 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
9658 |
|
---|
9659 | // check sign of y data
|
---|
9660 | Double tempd=null;
|
---|
9661 | ArrayList<Object> retY = Regression.dataSign(yData);
|
---|
9662 | tempd = (Double)retY.get(4);
|
---|
9663 | double yPeak = tempd.doubleValue();
|
---|
9664 | boolean yFlag = false;
|
---|
9665 | if(yPeak<0.0D){
|
---|
9666 | System.out.println("Regression.fitLogistic(): This implementation of the Logistic distribution takes only positive y values\n(noise taking low values below zero are allowed)");
|
---|
9667 | System.out.println("All y values have been multiplied by -1 before fitting");
|
---|
9668 | for(int i =0; i<this.nData; i++){
|
---|
9669 | yData[i] = -yData[i];
|
---|
9670 | }
|
---|
9671 | retY = Regression.dataSign(yData);
|
---|
9672 | yFlag=true;
|
---|
9673 | }
|
---|
9674 |
|
---|
9675 | // Calculate x value at peak y (estimate of the Logistic mean)
|
---|
9676 | ArrayList<Object> ret1 = Regression.dataSign(yData);
|
---|
9677 | Integer tempi = null;
|
---|
9678 | tempi = (Integer)ret1.get(5);
|
---|
9679 | int peaki = tempi.intValue();
|
---|
9680 | double mu = xData[0][peaki];
|
---|
9681 |
|
---|
9682 | // Calculate an estimate of the beta
|
---|
9683 | double beta = Math.sqrt(6.0D)*halfWidth(xData[0], yData)/Math.PI;
|
---|
9684 |
|
---|
9685 | // Calculate estimate of y scale
|
---|
9686 | tempd = (Double)ret1.get(4);
|
---|
9687 | double ym = tempd.doubleValue();
|
---|
9688 | ym=ym*beta*Math.sqrt(2.0D*Math.PI);
|
---|
9689 |
|
---|
9690 | // Fill arrays needed by the Simplex
|
---|
9691 | double[] start = new double[this.nTerms];
|
---|
9692 | double[] step = new double[this.nTerms];
|
---|
9693 | start[0] = mu;
|
---|
9694 | start[1] = beta;
|
---|
9695 | if(this.scaleFlag){
|
---|
9696 | start[2] = ym;
|
---|
9697 | }
|
---|
9698 | step[0] = 0.1D*beta;
|
---|
9699 | step[1] = 0.1D*start[1];
|
---|
9700 | if(step[1]==0.0D){
|
---|
9701 | ArrayList<Object> ret0 = Regression.dataSign(xData[0]);
|
---|
9702 | Double tempdd = null;
|
---|
9703 | tempdd = (Double)ret0.get(2);
|
---|
9704 | double xmax = tempdd.doubleValue();
|
---|
9705 | if(xmax==0.0D){
|
---|
9706 | tempdd = (Double)ret0.get(0);
|
---|
9707 | xmax = tempdd.doubleValue();
|
---|
9708 | }
|
---|
9709 | step[0]=xmax*0.1D;
|
---|
9710 | }
|
---|
9711 | if(this.scaleFlag)step[2] = 0.1D*start[2];
|
---|
9712 |
|
---|
9713 | // Nelder and Mead Simplex Regression
|
---|
9714 | LogisticFunction f = new LogisticFunction();
|
---|
9715 | this.addConstraint(1,-1,0.0D);
|
---|
9716 | f.scaleOption = this.scaleFlag;
|
---|
9717 | f.scaleFactor = this.yScaleFactor;
|
---|
9718 | Object regFun2 = (Object)f;
|
---|
9719 | this.nelderMead(regFun2, start, step, this.fTol, this.nMax);
|
---|
9720 |
|
---|
9721 | if(plotFlag==1){
|
---|
9722 | // Print results
|
---|
9723 | if(!this.supressPrint)this.print();
|
---|
9724 |
|
---|
9725 | // Plot results
|
---|
9726 | int flag = this.plotXY(f);
|
---|
9727 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
9728 | }
|
---|
9729 |
|
---|
9730 | if(yFlag){
|
---|
9731 | // restore data
|
---|
9732 | for(int i=0; i<this.nData-1; i++){
|
---|
9733 | this.yData[i]=-this.yData[i];
|
---|
9734 | }
|
---|
9735 | }
|
---|
9736 |
|
---|
9737 | }
|
---|
9738 |
|
---|
9739 | public void beta(){
|
---|
9740 | this.fitBeta(0, 0);
|
---|
9741 | }
|
---|
9742 |
|
---|
9743 | public void betaPlot(){
|
---|
9744 | this.fitBeta(1, 0);
|
---|
9745 | }
|
---|
9746 |
|
---|
9747 | public void betaMinMax(){
|
---|
9748 | this.fitBeta(0, 1);
|
---|
9749 | }
|
---|
9750 |
|
---|
9751 | public void betaMinMaxPlot(){
|
---|
9752 | this.fitBeta(1, 1);
|
---|
9753 | }
|
---|
9754 |
|
---|
9755 | protected void fitBeta(int allTest, int typeFlag){
|
---|
9756 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
9757 | this.userSupplied = false;
|
---|
9758 | switch(typeFlag){
|
---|
9759 | case 0: this.lastMethod=31;
|
---|
9760 | this.nTerms=3;
|
---|
9761 | break;
|
---|
9762 | case 1: this.lastMethod=32;
|
---|
9763 | this.nTerms=5;
|
---|
9764 | break;
|
---|
9765 | }
|
---|
9766 | if(!this.scaleFlag)this.nTerms=this.nTerms-1;
|
---|
9767 |
|
---|
9768 | this.zeroCheck = false;
|
---|
9769 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
9770 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
9771 |
|
---|
9772 | // order data into ascending order of the abscissae
|
---|
9773 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
9774 |
|
---|
9775 | // check sign of y data
|
---|
9776 | Double tempd=null;
|
---|
9777 | ArrayList<Object> retY = Regression.dataSign(yData);
|
---|
9778 | tempd = (Double)retY.get(4);
|
---|
9779 | double yPeak = tempd.doubleValue();
|
---|
9780 | boolean yFlag = false;
|
---|
9781 | if(yPeak<0.0D){
|
---|
9782 | System.out.println("Regression.fitBeta(): This implementation of the Beta distribution takes only positive y values\n(noise taking low values below zero are allowed)");
|
---|
9783 | System.out.println("All y values have been multiplied by -1 before fitting");
|
---|
9784 | for(int i =0; i<this.nData; i++){
|
---|
9785 | yData[i] = -yData[i];
|
---|
9786 | }
|
---|
9787 | retY = Regression.dataSign(yData);
|
---|
9788 | yFlag=true;
|
---|
9789 | }
|
---|
9790 |
|
---|
9791 | // check x data
|
---|
9792 | ArrayList<Object> retX = Regression.dataSign(xData[0]);
|
---|
9793 | Integer tempi = null;
|
---|
9794 |
|
---|
9795 | // Calculate x value at peak y (estimate of the 'distribution mode')
|
---|
9796 | tempi = (Integer)retY.get(5);
|
---|
9797 | int peaki = tempi.intValue();
|
---|
9798 | double distribMode = xData[0][peaki];
|
---|
9799 |
|
---|
9800 | // minimum value
|
---|
9801 | tempd = (Double)retX.get(0);
|
---|
9802 | double minX = tempd.doubleValue();
|
---|
9803 | // maximum value
|
---|
9804 | tempd = (Double)retX.get(2);
|
---|
9805 | double maxX = tempd.doubleValue();
|
---|
9806 | // mean value
|
---|
9807 | tempd = (Double)retX.get(8);
|
---|
9808 | double meanX = tempd.doubleValue();
|
---|
9809 |
|
---|
9810 |
|
---|
9811 | // test that data is within range
|
---|
9812 | if(typeFlag==0){
|
---|
9813 | if(minX<0.0D){
|
---|
9814 | System.out.println("Regression: beta: data points must be greater than or equal to 0");
|
---|
9815 | System.out.println("method betaMinMax used in place of method beta");
|
---|
9816 | typeFlag = 1;
|
---|
9817 | this.lastMethod=32;
|
---|
9818 | this.nTerms=5;
|
---|
9819 | }
|
---|
9820 | if(maxX>1.0D){
|
---|
9821 | System.out.println("Regression: beta: data points must be less than or equal to 1");
|
---|
9822 | System.out.println("method betaMinMax used in place of method beta");
|
---|
9823 | typeFlag = 1;
|
---|
9824 | this.lastMethod=32;
|
---|
9825 | this.nTerms=5;
|
---|
9826 | }
|
---|
9827 | }
|
---|
9828 |
|
---|
9829 | // Calculate an estimate of the alpha, beta and scale factor
|
---|
9830 | double dMode = distribMode;
|
---|
9831 | double dMean = meanX;
|
---|
9832 | if(typeFlag==1){
|
---|
9833 | dMode = (distribMode - minX*0.9D)/(maxX*1.2D - minX*0.9D);
|
---|
9834 | dMean = (meanX - minX*0.9D)/(maxX*1.2D - minX*0.9D);
|
---|
9835 | }
|
---|
9836 | double alphaGuess = 2.0D*dMode*dMean/(dMode - dMean);
|
---|
9837 | if(alphaGuess<1.3)alphaGuess = 1.6D;
|
---|
9838 | double betaGuess = alphaGuess*(1.0D - dMean)/dMean;
|
---|
9839 | if(betaGuess<=1.3)betaGuess = 1.6D;
|
---|
9840 | double scaleGuess = 0.0D;
|
---|
9841 | if(typeFlag==0){
|
---|
9842 | scaleGuess = yPeak/Stat.betaPDF(alphaGuess, betaGuess, distribMode);
|
---|
9843 | }
|
---|
9844 | else{
|
---|
9845 | scaleGuess = yPeak/Stat.betaPDF(minX, maxX, alphaGuess, betaGuess, distribMode);
|
---|
9846 | }
|
---|
9847 | if(scaleGuess<0)scaleGuess=1;
|
---|
9848 |
|
---|
9849 |
|
---|
9850 | // Nelder and Mead Simplex Regression for Gumbel
|
---|
9851 | // Fill arrays needed by the Simplex
|
---|
9852 | double[] start = new double[this.nTerms];
|
---|
9853 | double[] step = new double[this.nTerms];
|
---|
9854 | switch(typeFlag){
|
---|
9855 | case 0: start[0] = alphaGuess; //alpha
|
---|
9856 | start[1] = betaGuess; //beta
|
---|
9857 | if(this.scaleFlag){
|
---|
9858 | start[2] = scaleGuess; //y axis scaling factor
|
---|
9859 | }
|
---|
9860 | step[0] = 0.1D*start[0];
|
---|
9861 | step[1] = 0.1D*start[1];
|
---|
9862 | if(this.scaleFlag)step[2] = 0.1D*start[2];
|
---|
9863 |
|
---|
9864 | // Add constraints
|
---|
9865 | this.addConstraint(0,-1,1.0D);
|
---|
9866 | this.addConstraint(1,-1,1.0D);
|
---|
9867 | break;
|
---|
9868 | case 1: start[0] = alphaGuess; //alpha
|
---|
9869 | start[1] = betaGuess; //beta
|
---|
9870 | start[2] = 0.9D*minX; // min
|
---|
9871 | start[3] = 1.1D*maxX; // max
|
---|
9872 | if(this.scaleFlag){
|
---|
9873 | start[4] = scaleGuess; //y axis scaling factor
|
---|
9874 | }
|
---|
9875 | step[0] = 0.1D*start[0];
|
---|
9876 | step[1] = 0.1D*start[1];
|
---|
9877 | step[2] = 0.1D*start[2];
|
---|
9878 | step[3] = 0.1D*start[3];
|
---|
9879 | if(this.scaleFlag)step[4] = 0.1D*start[4];
|
---|
9880 |
|
---|
9881 | // Add constraints
|
---|
9882 | this.addConstraint(0,-1,1.0D);
|
---|
9883 | this.addConstraint(1,-1,1.0D);
|
---|
9884 | this.addConstraint(2,+1,minX);
|
---|
9885 | this.addConstraint(3,-1,maxX);
|
---|
9886 | break;
|
---|
9887 |
|
---|
9888 | }
|
---|
9889 |
|
---|
9890 | // Create instance of Beta function
|
---|
9891 | BetaFunction ff = new BetaFunction();
|
---|
9892 |
|
---|
9893 | // Set minimum maximum type option
|
---|
9894 | ff.typeFlag = typeFlag;
|
---|
9895 |
|
---|
9896 | // Set ordinate scaling option
|
---|
9897 | ff.scaleOption = this.scaleFlag;
|
---|
9898 | ff.scaleFactor = this.yScaleFactor;
|
---|
9899 |
|
---|
9900 | // Perform simplex regression
|
---|
9901 | Object regFun3 = (Object)ff;
|
---|
9902 | this.nelderMead(regFun3, start, step, this.fTol, this.nMax);
|
---|
9903 |
|
---|
9904 | if(allTest==1){
|
---|
9905 | // Print results
|
---|
9906 | if(!this.supressPrint)this.print();
|
---|
9907 |
|
---|
9908 | // Plot results
|
---|
9909 | int flag = this.plotXY(ff);
|
---|
9910 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
9911 | }
|
---|
9912 |
|
---|
9913 | if(yFlag){
|
---|
9914 | // restore data
|
---|
9915 | for(int i=0; i<this.nData-1; i++){
|
---|
9916 | this.yData[i]=-this.yData[i];
|
---|
9917 | }
|
---|
9918 | }
|
---|
9919 | }
|
---|
9920 |
|
---|
9921 | public void gamma(){
|
---|
9922 | this.fitGamma(0, 0);
|
---|
9923 | }
|
---|
9924 |
|
---|
9925 | public void gammaPlot(){
|
---|
9926 | this.fitGamma(1, 0);
|
---|
9927 | }
|
---|
9928 |
|
---|
9929 | public void gammaStandard(){
|
---|
9930 | this.fitGamma(0, 1);
|
---|
9931 | }
|
---|
9932 |
|
---|
9933 | public void gammaStandardPlot(){
|
---|
9934 | this.fitGamma(1, 1);
|
---|
9935 | }
|
---|
9936 |
|
---|
9937 | protected void fitGamma(int allTest, int typeFlag){
|
---|
9938 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
9939 | this.userSupplied = false;
|
---|
9940 | switch(typeFlag){
|
---|
9941 | case 0: this.lastMethod=33;
|
---|
9942 | this.nTerms=4;
|
---|
9943 | break;
|
---|
9944 | case 1: this.lastMethod=34;
|
---|
9945 | this.nTerms=2;
|
---|
9946 | break;
|
---|
9947 | }
|
---|
9948 | if(!this.scaleFlag)this.nTerms=this.nTerms-1;
|
---|
9949 |
|
---|
9950 | this.zeroCheck = false;
|
---|
9951 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
9952 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
9953 |
|
---|
9954 | // order data into ascending order of the abscissae
|
---|
9955 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
9956 |
|
---|
9957 | // check sign of y data
|
---|
9958 | Double tempd=null;
|
---|
9959 | ArrayList<Object> retY = Regression.dataSign(yData);
|
---|
9960 | tempd = (Double)retY.get(4);
|
---|
9961 | double yPeak = tempd.doubleValue();
|
---|
9962 | boolean yFlag = false;
|
---|
9963 | if(yPeak<0.0D){
|
---|
9964 | System.out.println("Regression.fitGamma(): This implementation of the Gamma distribution takes only positive y values\n(noise taking low values below zero are allowed)");
|
---|
9965 | System.out.println("All y values have been multiplied by -1 before fitting");
|
---|
9966 | for(int i =0; i<this.nData; i++){
|
---|
9967 | yData[i] = -yData[i];
|
---|
9968 | }
|
---|
9969 | retY = Regression.dataSign(yData);
|
---|
9970 | yFlag=true;
|
---|
9971 | }
|
---|
9972 |
|
---|
9973 | // check x data
|
---|
9974 | ArrayList<Object> retX = Regression.dataSign(xData[0]);
|
---|
9975 | Integer tempi = null;
|
---|
9976 |
|
---|
9977 | // Calculate x value at peak y (estimate of the 'distribution mode')
|
---|
9978 | tempi = (Integer)retY.get(5);
|
---|
9979 | int peaki = tempi.intValue();
|
---|
9980 | double distribMode = xData[0][peaki];
|
---|
9981 |
|
---|
9982 | // minimum value
|
---|
9983 | tempd = (Double)retX.get(0);
|
---|
9984 | double minX = tempd.doubleValue();
|
---|
9985 | // maximum value
|
---|
9986 | tempd = (Double)retX.get(2);
|
---|
9987 | double maxX = tempd.doubleValue();
|
---|
9988 | // mean value
|
---|
9989 | tempd = (Double)retX.get(8);
|
---|
9990 | double meanX = tempd.doubleValue();
|
---|
9991 |
|
---|
9992 |
|
---|
9993 | // test that data is within range
|
---|
9994 | if(typeFlag==1){
|
---|
9995 | if(minX<0.0D){
|
---|
9996 | System.out.println("Regression: gammaStandard: data points must be greater than or equal to 0");
|
---|
9997 | System.out.println("method gamma used in place of method gammaStandard");
|
---|
9998 | typeFlag = 0;
|
---|
9999 | this.lastMethod=33;
|
---|
10000 | this.nTerms=2;
|
---|
10001 | }
|
---|
10002 | }
|
---|
10003 |
|
---|
10004 | // Calculate an estimate of the mu, beta, gamma and scale factor
|
---|
10005 | double muGuess = 0.8D*minX;
|
---|
10006 | if(muGuess==0.0D)muGuess = -0.1D;
|
---|
10007 | double betaGuess = meanX - distribMode;
|
---|
10008 | if(betaGuess<=0.0D)betaGuess = 1.0D;
|
---|
10009 | double gammaGuess = (meanX + muGuess)/betaGuess;
|
---|
10010 | if(typeFlag==1)gammaGuess = meanX;
|
---|
10011 | if(gammaGuess<=0.0D)gammaGuess = 1.0D;
|
---|
10012 | double scaleGuess = 0.0D;
|
---|
10013 | if(typeFlag==0){
|
---|
10014 | scaleGuess = yPeak/Stat.gammaPDF(muGuess, betaGuess, gammaGuess, distribMode);
|
---|
10015 | }
|
---|
10016 | else{
|
---|
10017 | scaleGuess = yPeak/Stat.gammaPDF(gammaGuess, distribMode);
|
---|
10018 | }
|
---|
10019 | if(scaleGuess<0)scaleGuess=1;
|
---|
10020 |
|
---|
10021 |
|
---|
10022 | // Nelder and Mead Simplex Regression for Gamma
|
---|
10023 | // Fill arrays needed by the Simplex
|
---|
10024 | double[] start = new double[this.nTerms];
|
---|
10025 | double[] step = new double[this.nTerms];
|
---|
10026 | switch(typeFlag){
|
---|
10027 | case 1: start[0] = gammaGuess; //gamma
|
---|
10028 | if(this.scaleFlag){
|
---|
10029 | start[1] = scaleGuess; //y axis scaling factor
|
---|
10030 | }
|
---|
10031 | step[0] = 0.1D*start[0];
|
---|
10032 | if(this.scaleFlag)step[1] = 0.1D*start[1];
|
---|
10033 |
|
---|
10034 | // Add constraints
|
---|
10035 | this.addConstraint(0,-1,0.0D);
|
---|
10036 | break;
|
---|
10037 | case 0: start[0] = muGuess; // mu
|
---|
10038 | start[1] = betaGuess; // beta
|
---|
10039 | start[2] = gammaGuess; // gamma
|
---|
10040 | if(this.scaleFlag){
|
---|
10041 | start[3] = scaleGuess; //y axis scaling factor
|
---|
10042 | }
|
---|
10043 | step[0] = 0.1D*start[0];
|
---|
10044 | step[1] = 0.1D*start[1];
|
---|
10045 | step[2] = 0.1D*start[2];
|
---|
10046 | if(this.scaleFlag)step[3] = 0.1D*start[3];
|
---|
10047 |
|
---|
10048 | // Add constraints
|
---|
10049 | this.addConstraint(1,-1,0.0D);
|
---|
10050 | this.addConstraint(2,-1,0.0D);
|
---|
10051 | break;
|
---|
10052 | }
|
---|
10053 |
|
---|
10054 | // Create instance of Gamma function
|
---|
10055 | GammaFunction ff = new GammaFunction();
|
---|
10056 |
|
---|
10057 | // Set type option
|
---|
10058 | ff.typeFlag = typeFlag;
|
---|
10059 |
|
---|
10060 | // Set ordinate scaling option
|
---|
10061 | ff.scaleOption = this.scaleFlag;
|
---|
10062 | ff.scaleFactor = this.yScaleFactor;
|
---|
10063 |
|
---|
10064 | // Perform simplex regression
|
---|
10065 | Object regFun3 = (Object)ff;
|
---|
10066 | this.nelderMead(regFun3, start, step, this.fTol, this.nMax);
|
---|
10067 |
|
---|
10068 | if(allTest==1){
|
---|
10069 | // Print results
|
---|
10070 | if(!this.supressPrint)this.print();
|
---|
10071 |
|
---|
10072 | // Plot results
|
---|
10073 | int flag = this.plotXY(ff);
|
---|
10074 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
10075 | }
|
---|
10076 |
|
---|
10077 | if(yFlag){
|
---|
10078 | // restore data
|
---|
10079 | for(int i=0; i<this.nData-1; i++){
|
---|
10080 | this.yData[i]=-this.yData[i];
|
---|
10081 | }
|
---|
10082 | }
|
---|
10083 | }
|
---|
10084 |
|
---|
10085 | // Fit to an Erlang Distribution
|
---|
10086 | public void erlang(){
|
---|
10087 | this.fitErlang(0, 0);
|
---|
10088 | }
|
---|
10089 |
|
---|
10090 | public void erlangPlot(){
|
---|
10091 | this.fitErlang(1, 0);
|
---|
10092 | }
|
---|
10093 |
|
---|
10094 | protected void fitErlang(int allTest, int typeFlag){
|
---|
10095 | if(this.multipleY)throw new IllegalArgumentException("This method cannot handle multiply dimensioned y arrays");
|
---|
10096 | this.lastMethod=35;
|
---|
10097 | this.userSupplied = false;
|
---|
10098 | int nTerms0 = 2; // number of erlang terms
|
---|
10099 | int nTerms1 = 4; // number of gamma terms - initial estimates procedure
|
---|
10100 | this.nTerms = nTerms1;
|
---|
10101 | if(!this.scaleFlag)this.nTerms=this.nTerms-1;
|
---|
10102 |
|
---|
10103 | this.zeroCheck = false;
|
---|
10104 | this.degreesOfFreedom=this.nData - this.nTerms;
|
---|
10105 | if(this.degreesOfFreedom<1 && !this.ignoreDofFcheck)throw new IllegalArgumentException("Degrees of freedom must be greater than 0");
|
---|
10106 |
|
---|
10107 | // order data into ascending order of the abscissae
|
---|
10108 | Regression.sort(this.xData[0], this.yData, this.weight);
|
---|
10109 |
|
---|
10110 | // check sign of y data
|
---|
10111 | Double tempd=null;
|
---|
10112 | ArrayList<Object> retY = Regression.dataSign(yData);
|
---|
10113 | tempd = (Double)retY.get(4);
|
---|
10114 | double yPeak = tempd.doubleValue();
|
---|
10115 | boolean yFlag = false;
|
---|
10116 | if(yPeak<0.0D){
|
---|
10117 | System.out.println("Regression.fitGamma(): This implementation of the Erlang distribution takes only positive y values\n(noise taking low values below zero are allowed)");
|
---|
10118 | System.out.println("All y values have been multiplied by -1 before fitting");
|
---|
10119 | for(int i =0; i<this.nData; i++){
|
---|
10120 | yData[i] = -yData[i];
|
---|
10121 | }
|
---|
10122 | retY = Regression.dataSign(yData);
|
---|
10123 | yFlag=true;
|
---|
10124 | }
|
---|
10125 |
|
---|
10126 | // check x data
|
---|
10127 | ArrayList<Object> retX = Regression.dataSign(xData[0]);
|
---|
10128 | Integer tempi = null;
|
---|
10129 |
|
---|
10130 | // Calculate x value at peak y (estimate of the 'distribution mode')
|
---|
10131 | tempi = (Integer)retY.get(5);
|
---|
10132 | int peaki = tempi.intValue();
|
---|
10133 | double distribMode = xData[0][peaki];
|
---|
10134 |
|
---|
10135 | // minimum value
|
---|
10136 | tempd = (Double)retX.get(0);
|
---|
10137 | double minX = tempd.doubleValue();
|
---|
10138 | // maximum value
|
---|
10139 | tempd = (Double)retX.get(2);
|
---|
10140 | double maxX = tempd.doubleValue();
|
---|
10141 | // mean value
|
---|
10142 | tempd = (Double)retX.get(8);
|
---|
10143 | double meanX = tempd.doubleValue();
|
---|
10144 |
|
---|
10145 |
|
---|
10146 | // test that data is within range
|
---|
10147 | if(minX<0.0D)throw new IllegalArgumentException("data points must be greater than or equal to 0");
|
---|
10148 |
|
---|
10149 | // FIT TO GAMMA DISTRIBUTION TO OBTAIN INITIAL ESTIMATES
|
---|
10150 | // Calculate an estimate of the mu, beta, gamma and scale factor
|
---|
10151 | double muGuess = 0.8D*minX;
|
---|
10152 | if(muGuess==0.0D)muGuess = -0.1D;
|
---|
10153 | double betaGuess = meanX - distribMode;
|
---|
10154 | if(betaGuess<=0.0D)betaGuess = 1.0D;
|
---|
10155 | double gammaGuess = (meanX + muGuess)/betaGuess;
|
---|
10156 | if(typeFlag==1)gammaGuess = meanX;
|
---|
10157 | if(gammaGuess<=0.0D)gammaGuess = 1.0D;
|
---|
10158 | double scaleGuess = 0.0D;
|
---|
10159 | scaleGuess = yPeak/Stat.gammaPDF(muGuess, betaGuess, gammaGuess, distribMode);
|
---|
10160 | if(scaleGuess<0)scaleGuess=1;
|
---|
10161 |
|
---|
10162 |
|
---|
10163 | // Nelder and Mead Simplex Regression for Gamma
|
---|
10164 | // Fill arrays needed by the Simplex
|
---|
10165 | double[] start = new double[this.nTerms];
|
---|
10166 | double[] step = new double[this.nTerms];
|
---|
10167 | start[0] = muGuess; // mu
|
---|
10168 | start[1] = betaGuess; // beta
|
---|
10169 | start[2] = gammaGuess; // gamma
|
---|
10170 | if(this.scaleFlag)start[3] = scaleGuess; //y axis scaling factor
|
---|
10171 |
|
---|
10172 | step[0] = 0.1D*start[0];
|
---|
10173 | step[1] = 0.1D*start[1];
|
---|
10174 | step[2] = 0.1D*start[2];
|
---|
10175 | if(this.scaleFlag)step[3] = 0.1D*start[3];
|
---|
10176 |
|
---|
10177 | // Add constraints
|
---|
10178 | this.addConstraint(1,-1,0.0D);
|
---|
10179 | this.addConstraint(2,-1,0.0D);
|
---|
10180 |
|
---|
10181 | // Create instance of Gamma function
|
---|
10182 | GammaFunction ff = new GammaFunction();
|
---|
10183 |
|
---|
10184 | // Set type option
|
---|
10185 | ff.typeFlag = typeFlag;
|
---|
10186 |
|
---|
10187 | // Set ordinate scaling option
|
---|
10188 | ff.scaleOption = this.scaleFlag;
|
---|
10189 | ff.scaleFactor = this.yScaleFactor;
|
---|
10190 |
|
---|
10191 | // Perform simplex regression
|
---|
10192 | Object regFun3 = (Object)ff;
|
---|
10193 | this.nelderMead(regFun3, start, step, this.fTol, this.nMax);
|
---|
10194 |
|
---|
10195 | // FIT TO ERLANG DISTRIBUTION USING GAMMA BEST ESTIMATES AS INITIAL ESTIMATES
|
---|
10196 | // AND VARYING RATE PARAMETER BY UNIT STEPS
|
---|
10197 | this.removeConstraints();
|
---|
10198 |
|
---|
10199 | // Initial estimates
|
---|
10200 | double[] bestGammaEst = this.getCoeff();
|
---|
10201 |
|
---|
10202 | // Swap from Gamma dimensions to Erlang dimensions
|
---|
10203 | this.nTerms = nTerms0;
|
---|
10204 | start = new double[this.nTerms];
|
---|
10205 | step = new double[this.nTerms];
|
---|
10206 | if(bestGammaEst[3]<0.0)bestGammaEst[3] *= -1.0;
|
---|
10207 |
|
---|
10208 | // initial estimates
|
---|
10209 | start[0] = 1.0D/bestGammaEst[1]; // lambda
|
---|
10210 | if(this.scaleFlag)start[1] = bestGammaEst[3]; //y axis scaling factor
|
---|
10211 |
|
---|
10212 | step[0] = 0.1D*start[0];
|
---|
10213 | if(this.scaleFlag)step[1] = 0.1D*start[1];
|
---|
10214 |
|
---|
10215 | // Add constraints
|
---|
10216 | this.addConstraint(0,-1,0.0D);
|
---|
10217 |
|
---|
10218 | // fix initial integer rate parameter
|
---|
10219 | double kay0 = Math.round(bestGammaEst[2]);
|
---|
10220 | double kay = kay0;
|
---|
10221 |
|
---|
10222 | // Create instance of Erlang function
|
---|
10223 | ErlangFunction ef = new ErlangFunction();
|
---|
10224 |
|
---|
10225 | // Set ordinate scaling option
|
---|
10226 | ef.scaleOption = this.scaleFlag;
|
---|
10227 | ef.scaleFactor = this.yScaleFactor;
|
---|
10228 | ef.kay = kay;
|
---|
10229 |
|
---|
10230 | // Fit stepping up
|
---|
10231 | boolean testKay = true;
|
---|
10232 | double ssMin = Double.NaN;
|
---|
10233 | double upSS = Double.NaN;
|
---|
10234 | double upKay = Double.NaN;
|
---|
10235 | double kayFinal = Double.NaN;
|
---|
10236 | int iStart = 1;
|
---|
10237 | int ssSame = 0;
|
---|
10238 |
|
---|
10239 | while(testKay){
|
---|
10240 |
|
---|
10241 | // Perform simplex regression
|
---|
10242 | Object regFun4 = (Object)ef;
|
---|
10243 |
|
---|
10244 | this.nelderMead(regFun4, start, step, this.fTol, this.nMax);
|
---|
10245 | double sumOfSquaresError = this.getSumOfSquares();
|
---|
10246 | if(iStart==1){
|
---|
10247 | iStart = 2;
|
---|
10248 | ssMin = sumOfSquaresError;
|
---|
10249 | kay = kay + 1;
|
---|
10250 | start[0] = 1.0D/bestGammaEst[1]; // lambda
|
---|
10251 | if(this.scaleFlag)start[1] = bestGammaEst[3]; //y axis scaling factor
|
---|
10252 | step[0] = 0.1D*start[0];
|
---|
10253 | if(this.scaleFlag)step[1] = 0.1D*start[1];
|
---|
10254 | this.addConstraint(0,-1,0.0D);
|
---|
10255 | ef.kay = kay;
|
---|
10256 | }
|
---|
10257 | else{
|
---|
10258 | if(sumOfSquaresError<=ssMin){
|
---|
10259 | if(sumOfSquaresError==ssMin){
|
---|
10260 | ssSame++;
|
---|
10261 | if(ssSame==10){
|
---|
10262 | upSS = ssMin;
|
---|
10263 | upKay = kay - 5;
|
---|
10264 | testKay = false;
|
---|
10265 | }
|
---|
10266 | }
|
---|
10267 | ssMin = sumOfSquaresError;
|
---|
10268 | kay = kay + 1;
|
---|
10269 | start[0] = 1.0D/bestGammaEst[1]; // lambda
|
---|
10270 | if(this.scaleFlag)start[1] = bestGammaEst[3]; //y axis scaling factor
|
---|
10271 | step[0] = 0.1D*start[0];
|
---|
10272 | if(this.scaleFlag)step[1] = 0.1D*start[1];
|
---|
10273 | this.addConstraint(0,-1,0.0D);
|
---|
10274 | ef.kay = kay;
|
---|
10275 | }
|
---|
10276 | else{
|
---|
10277 | upSS = ssMin;
|
---|
10278 | upKay = kay - 1;
|
---|
10279 | testKay = false;
|
---|
10280 | }
|
---|
10281 | }
|
---|
10282 | }
|
---|
10283 |
|
---|
10284 | if(kay0==1){
|
---|
10285 | kayFinal = upKay;
|
---|
10286 | }
|
---|
10287 | else{
|
---|
10288 |
|
---|
10289 | // Fit stepping down
|
---|
10290 | iStart = 1;
|
---|
10291 | testKay = true;
|
---|
10292 | ssMin = Double.NaN;
|
---|
10293 | double downSS = Double.NaN;
|
---|
10294 | double downKay = Double.NaN;
|
---|
10295 | // initial estimates
|
---|
10296 | start[0] = 1.0D/bestGammaEst[1]; // lambda
|
---|
10297 | if(this.scaleFlag)start[1] = bestGammaEst[3]; //y axis scaling factor
|
---|
10298 | step[0] = 0.1D*start[0];
|
---|
10299 | if(this.scaleFlag)step[1] = 0.1D*start[1];
|
---|
10300 | // Add constraints
|
---|
10301 | this.addConstraint(0,-1,0.0D);
|
---|
10302 | kay = kay0;
|
---|
10303 | ef.kay = kay;
|
---|
10304 |
|
---|
10305 | while(testKay){
|
---|
10306 |
|
---|
10307 | // Perform simplex regression
|
---|
10308 | Object regFun5 = (Object)ef;
|
---|
10309 |
|
---|
10310 | this.nelderMead(regFun5, start, step, this.fTol, this.nMax);
|
---|
10311 | double sumOfSquaresError = this.getSumOfSquares();
|
---|
10312 | if(iStart==1){
|
---|
10313 | iStart = 2;
|
---|
10314 | ssMin = sumOfSquaresError;
|
---|
10315 | kay = kay - 1;
|
---|
10316 | if(Math.rint(kay)<1L){
|
---|
10317 | downSS = ssMin;
|
---|
10318 | downKay = kay + 1;
|
---|
10319 | testKay = false;
|
---|
10320 | }
|
---|
10321 | else{
|
---|
10322 | start[0] = 1.0D/bestGammaEst[1]; // lambda
|
---|
10323 | if(this.scaleFlag)start[1] = bestGammaEst[3]; //y axis scaling factor
|
---|
10324 | step[0] = 0.1D*start[0];
|
---|
10325 | if(this.scaleFlag)step[1] = 0.1D*start[1];
|
---|
10326 | this.addConstraint(0,-1,0.0D);
|
---|
10327 | ef.kay = kay;
|
---|
10328 | }
|
---|
10329 | }
|
---|
10330 | else{
|
---|
10331 | if(sumOfSquaresError<=ssMin){
|
---|
10332 | ssMin = sumOfSquaresError;
|
---|
10333 | kay = kay - 1;
|
---|
10334 | if(Math.rint(kay)<1L){
|
---|
10335 | downSS = ssMin;
|
---|
10336 | downKay = kay + 1;
|
---|
10337 | testKay = false;
|
---|
10338 | }
|
---|
10339 | else{
|
---|
10340 | start[0] = 1.0D/bestGammaEst[1]; // lambda
|
---|
10341 | if(this.scaleFlag)start[1] = bestGammaEst[3]; //y axis scaling factor
|
---|
10342 | step[0] = 0.1D*start[0];
|
---|
10343 | if(this.scaleFlag)step[1] = 0.1D*start[1];
|
---|
10344 | this.addConstraint(0,-1,0.0D);
|
---|
10345 | ef.kay = kay;
|
---|
10346 | }
|
---|
10347 | }
|
---|
10348 | else{
|
---|
10349 | downSS = ssMin;
|
---|
10350 | downKay = kay + 1;
|
---|
10351 | testKay = false;
|
---|
10352 | }
|
---|
10353 | }
|
---|
10354 |
|
---|
10355 | }
|
---|
10356 | if(downSS<upSS){
|
---|
10357 | kayFinal = downKay;
|
---|
10358 | }
|
---|
10359 | else{
|
---|
10360 | kayFinal = upKay;
|
---|
10361 | }
|
---|
10362 |
|
---|
10363 | }
|
---|
10364 |
|
---|
10365 | // Penultimate fit
|
---|
10366 | // initial estimates
|
---|
10367 | start[0] = 1.0D/bestGammaEst[1]; // lambda
|
---|
10368 | if(this.scaleFlag)start[1] = bestGammaEst[3]; //y axis scaling factor
|
---|
10369 |
|
---|
10370 | step[0] = 0.1D*start[0];
|
---|
10371 | if(this.scaleFlag)step[1] = 0.1D*start[1];
|
---|
10372 |
|
---|
10373 | // Add constraints
|
---|
10374 | this.addConstraint(0,-1,0.0D);
|
---|
10375 |
|
---|
10376 | // Set function variables
|
---|
10377 | ef.scaleOption = this.scaleFlag;
|
---|
10378 | ef.scaleFactor = this.yScaleFactor;
|
---|
10379 | ef.kay = Math.round(kayFinal);
|
---|
10380 | this.kayValue = Math.round(kayFinal);
|
---|
10381 |
|
---|
10382 | // Perform penultimate regression
|
---|
10383 | Object regFun4 = (Object)ef;
|
---|
10384 |
|
---|
10385 | this.nelderMead(regFun4, start, step, this.fTol, this.nMax);
|
---|
10386 | double[] coeff = getCoeff();
|
---|
10387 |
|
---|
10388 | // Final fit
|
---|
10389 |
|
---|
10390 | // initial estimates
|
---|
10391 | start[0] = coeff[0]; // lambda
|
---|
10392 | if(this.scaleFlag)start[1] = coeff[1]; //y axis scaling factor
|
---|
10393 |
|
---|
10394 | step[0] = 0.1D*start[0];
|
---|
10395 | if(this.scaleFlag)step[1] = 0.1D*start[1];
|
---|
10396 |
|
---|
10397 | // Add constraints
|
---|
10398 | this.addConstraint(0,-1,0.0D);
|
---|
10399 |
|
---|
10400 | // Set function variables
|
---|
10401 | ef.scaleOption = this.scaleFlag;
|
---|
10402 | ef.scaleFactor = this.yScaleFactor;
|
---|
10403 | ef.kay = Math.round(kayFinal);
|
---|
10404 | this.kayValue = Math.round(kayFinal);
|
---|
10405 |
|
---|
10406 | // Perform final regression
|
---|
10407 | Object regFun5 = (Object)ef;
|
---|
10408 |
|
---|
10409 | this.nelderMead(regFun5, start, step, this.fTol, this.nMax);
|
---|
10410 |
|
---|
10411 | if(allTest==1){
|
---|
10412 | // Print results
|
---|
10413 | if(!this.supressPrint)this.print();
|
---|
10414 |
|
---|
10415 | // Plot results
|
---|
10416 | int flag = this.plotXY(ef);
|
---|
10417 | if(flag!=-2 && !this.supressYYplot)this.plotYY();
|
---|
10418 | }
|
---|
10419 |
|
---|
10420 | if(yFlag){
|
---|
10421 | // restore data
|
---|
10422 | for(int i=0; i<this.nData-1; i++){
|
---|
10423 | this.yData[i]=-this.yData[i];
|
---|
10424 | }
|
---|
10425 | }
|
---|
10426 | }
|
---|
10427 |
|
---|
10428 | // return Erlang rate parameter (k) value
|
---|
10429 | public double getKayValue(){
|
---|
10430 | return this.kayValue;
|
---|
10431 | }
|
---|
10432 |
|
---|
10433 |
|
---|
10434 | // HISTOGRAM METHODS
|
---|
10435 | // Distribute data into bins to obtain histogram
|
---|
10436 | // zero bin position and upper limit provided
|
---|
10437 | public static double[][] histogramBins(double[] data, double binWidth, double binZero, double binUpper){
|
---|
10438 | int n = 0; // new array length
|
---|
10439 | int m = data.length; // old array length;
|
---|
10440 | for(int i=0; i<m; i++)if(data[i]<=binUpper)n++;
|
---|
10441 | if(n!=m){
|
---|
10442 | double[] newData = new double[n];
|
---|
10443 | int j = 0;
|
---|
10444 | for(int i=0; i<m; i++){
|
---|
10445 | if(data[i]<=binUpper){
|
---|
10446 | newData[j] = data[i];
|
---|
10447 | j++;
|
---|
10448 | }
|
---|
10449 | }
|
---|
10450 | System.out.println((m-n)+" data points, above histogram upper limit, excluded in histogramBins");
|
---|
10451 | return histogramBins(newData, binWidth, binZero);
|
---|
10452 | }
|
---|
10453 | else{
|
---|
10454 | return histogramBins(data, binWidth, binZero);
|
---|
10455 |
|
---|
10456 | }
|
---|
10457 | }
|
---|
10458 |
|
---|
10459 | // Distribute data into bins to obtain histogram
|
---|
10460 | // zero bin position provided
|
---|
10461 | public static double[][] histogramBins(double[] data, double binWidth, double binZero){
|
---|
10462 | double dmax = Fmath.maximum(data);
|
---|
10463 | int nBins = (int) Math.ceil((dmax - binZero)/binWidth);
|
---|
10464 | if(binZero+nBins*binWidth>dmax)nBins++;
|
---|
10465 | int nPoints = data.length;
|
---|
10466 | int[] dataCheck = new int[nPoints];
|
---|
10467 | for(int i=0; i<nPoints; i++)dataCheck[i]=0;
|
---|
10468 | double[]binWall = new double[nBins+1];
|
---|
10469 | binWall[0]=binZero;
|
---|
10470 | for(int i=1; i<=nBins; i++){
|
---|
10471 | binWall[i] = binWall[i-1] + binWidth;
|
---|
10472 | }
|
---|
10473 | double[][] binFreq = new double[2][nBins];
|
---|
10474 | for(int i=0; i<nBins; i++){
|
---|
10475 | binFreq[0][i]= (binWall[i]+binWall[i+1])/2.0D;
|
---|
10476 | binFreq[1][i]= 0.0D;
|
---|
10477 | }
|
---|
10478 | boolean test = true;
|
---|
10479 |
|
---|
10480 | for(int i=0; i<nPoints; i++){
|
---|
10481 | test=true;
|
---|
10482 | int j=0;
|
---|
10483 | while(test){
|
---|
10484 | if(j==nBins-1){
|
---|
10485 | if(data[i]>=binWall[j] && data[i]<=binWall[j+1]*(1.0D + Regression.histTol)){
|
---|
10486 | binFreq[1][j]+= 1.0D;
|
---|
10487 | dataCheck[i]=1;
|
---|
10488 | test=false;
|
---|
10489 | }
|
---|
10490 | }
|
---|
10491 | else{
|
---|
10492 | if(data[i]>=binWall[j] && data[i]<binWall[j+1]){
|
---|
10493 | binFreq[1][j]+= 1.0D;
|
---|
10494 | dataCheck[i]=1;
|
---|
10495 | test=false;
|
---|
10496 | }
|
---|
10497 | }
|
---|
10498 | if(test){
|
---|
10499 | if(j==nBins-1){
|
---|
10500 | test=false;
|
---|
10501 | }
|
---|
10502 | else{
|
---|
10503 | j++;
|
---|
10504 | }
|
---|
10505 | }
|
---|
10506 | }
|
---|
10507 | }
|
---|
10508 | int nMissed=0;
|
---|
10509 | for(int i=0; i<nPoints; i++)if(dataCheck[i]==0){
|
---|
10510 | nMissed++;
|
---|
10511 | System.out.println("p " + i + " " + data[i] + " " + binWall[0] + " " + binWall[nBins]);
|
---|
10512 | }
|
---|
10513 | if(nMissed>0)System.out.println(nMissed+" data points, outside histogram limits, excluded in histogramBins");
|
---|
10514 | return binFreq;
|
---|
10515 | }
|
---|
10516 |
|
---|
10517 | // Distribute data into bins to obtain histogram
|
---|
10518 | // zero bin position calculated
|
---|
10519 | public static double[][] histogramBins(double[] data, double binWidth){
|
---|
10520 |
|
---|
10521 | double dmin = Fmath.minimum(data);
|
---|
10522 | double dmax = Fmath.maximum(data);
|
---|
10523 | double span = dmax - dmin;
|
---|
10524 | double binZero = dmin;
|
---|
10525 | int nBins = (int) Math.ceil(span/binWidth);
|
---|
10526 | double histoSpan = ((double)nBins)*binWidth;
|
---|
10527 | double rem = histoSpan - span;
|
---|
10528 | if(rem>=0){
|
---|
10529 | binZero -= rem/2.0D;
|
---|
10530 | }
|
---|
10531 | else{
|
---|
10532 | if(Math.abs(rem)/span>Regression.histTol){
|
---|
10533 | // readjust binWidth
|
---|
10534 | boolean testBw = true;
|
---|
10535 | double incr = Regression.histTol/nBins;
|
---|
10536 | int iTest = 0;
|
---|
10537 | while(testBw){
|
---|
10538 | binWidth += incr;
|
---|
10539 | histoSpan = ((double)nBins)*binWidth;
|
---|
10540 | rem = histoSpan - span;
|
---|
10541 | if(rem<0){
|
---|
10542 | iTest++;
|
---|
10543 | if(iTest>1000){
|
---|
10544 | testBw = false;
|
---|
10545 | System.out.println("histogram method could not encompass all data within histogram\nContact Michael thomas Flanagan");
|
---|
10546 | }
|
---|
10547 | }
|
---|
10548 | else{
|
---|
10549 | testBw = false;
|
---|
10550 | }
|
---|
10551 | }
|
---|
10552 | }
|
---|
10553 | }
|
---|
10554 |
|
---|
10555 | return Regression.histogramBins(data, binWidth, binZero);
|
---|
10556 | }
|
---|
10557 |
|
---|
10558 | }
|
---|
10559 |
|
---|
10560 | // CLASSES TO EVALUATE THE SPECIAL FUNCTIONS
|
---|
10561 |
|
---|
10562 | // Class to evaluate the Gausian (normal) function y = (yscale/sd.sqrt(2.pi)).exp(-0.5[(x - xmean)/sd]^2).
|
---|
10563 | class GaussianFunction implements RegressionFunction{
|
---|
10564 | public boolean scaleOption = true;
|
---|
10565 | public double scaleFactor = 1.0D;
|
---|
10566 | public double function(double[] p, double[] x){
|
---|
10567 | double yScale = scaleFactor;
|
---|
10568 | if(scaleOption)yScale = p[2];
|
---|
10569 | double y = (yScale/(p[1]*Math.sqrt(2.0D*Math.PI)))*Math.exp(-0.5D*Fmath.square((x[0]-p[0])/p[1]));
|
---|
10570 | return y;
|
---|
10571 | }
|
---|
10572 | }
|
---|
10573 |
|
---|
10574 | // Class to evaluate the Gausian (normal) function y = (yscale/sd.sqrt(2.pi)).exp(-0.5[(x - xmean)/sd]^2).
|
---|
10575 | // Some parameters may be fixed
|
---|
10576 | class GaussianFunctionFixed implements RegressionFunction{
|
---|
10577 |
|
---|
10578 | public double[] param = new double[3];
|
---|
10579 | public boolean[] fixed = new boolean[3];
|
---|
10580 |
|
---|
10581 | public double function(double[] p, double[] x){
|
---|
10582 |
|
---|
10583 | int ii = 0;
|
---|
10584 | for(int i=0; i<3; i++){
|
---|
10585 | if(!fixed[i]){
|
---|
10586 | param[i] = p[ii];
|
---|
10587 | ii++;
|
---|
10588 | }
|
---|
10589 | }
|
---|
10590 |
|
---|
10591 | double y = (param[2]/(param[1]*Math.sqrt(2.0D*Math.PI)))*Math.exp(-0.5D*Fmath.square((x[0]-param[0])/param[1]));
|
---|
10592 | return y;
|
---|
10593 | }
|
---|
10594 | }
|
---|
10595 |
|
---|
10596 | // Class to evaluate the multiple Gausian (normal) function y = Sum[(A(i)/sd(i).sqrt(2.pi)).exp(-0.5[(x - xmean(i))/sd(i)]^2)].
|
---|
10597 | class MultipleGaussianFunction implements RegressionFunction{
|
---|
10598 | public boolean scaleOption = true;
|
---|
10599 | public double scaleFactor = 1.0D;
|
---|
10600 | public int nGaussians = 1;
|
---|
10601 |
|
---|
10602 | public double function(double[] p, double[] x){
|
---|
10603 | double y = 0.0;
|
---|
10604 | int counter = 0;
|
---|
10605 | for(int i=0; i<nGaussians; i++){
|
---|
10606 | y += (p[counter+2]/(p[counter+1]*Math.sqrt(2.0D*Math.PI)))*Math.exp(-0.5D*Fmath.square((x[0]-p[counter])/p[counter+1]));
|
---|
10607 | counter += 3;
|
---|
10608 | }
|
---|
10609 | return y;
|
---|
10610 | }
|
---|
10611 | }
|
---|
10612 |
|
---|
10613 |
|
---|
10614 | // Class to evaluate the two parameter log-normal function y = (yscale/x.sigma.sqrt(2.pi)).exp(-0.5[(log(x) - mu)/sd]^2).
|
---|
10615 | class LogNormalTwoParFunction implements RegressionFunction{
|
---|
10616 | public boolean scaleOption = true;
|
---|
10617 | public double scaleFactor = 1.0D;
|
---|
10618 | public double function(double[] p, double[] x){
|
---|
10619 | double yScale = scaleFactor;
|
---|
10620 | if(scaleOption)yScale = p[2];
|
---|
10621 | double y = (yScale/(x[0]*p[1]*Math.sqrt(2.0D*Math.PI)))*Math.exp(-0.5D*Fmath.square((Math.log(x[0])-p[0])/p[1]));
|
---|
10622 | return y;
|
---|
10623 | }
|
---|
10624 | }
|
---|
10625 |
|
---|
10626 | // Class to evaluate the three parameter log-normal function y = (yscale/(x-alpha).beta.sqrt(2.pi)).exp(-0.5[(log((x-alpha)/gamma)/sd]^2).
|
---|
10627 | class LogNormalThreeParFunction implements RegressionFunction{
|
---|
10628 | public boolean scaleOption = true;
|
---|
10629 | public double scaleFactor = 1.0D;
|
---|
10630 | public double function(double[] p, double[] x){
|
---|
10631 | double yScale = scaleFactor;
|
---|
10632 | if(scaleOption)yScale = p[3];
|
---|
10633 | double y = (yScale/((x[0]-p[0])*p[1]*Math.sqrt(2.0D*Math.PI)))*Math.exp(-0.5D*Fmath.square(Math.log((x[0]-p[0])/p[2])/p[1]));
|
---|
10634 | return y;
|
---|
10635 | }
|
---|
10636 | }
|
---|
10637 |
|
---|
10638 |
|
---|
10639 | // Class to evaluate the Lorentzian function
|
---|
10640 | // y = (yscale/pi).(gamma/2)/((x - mu)^2+(gamma/2)^2).
|
---|
10641 | class LorentzianFunction implements RegressionFunction{
|
---|
10642 | public boolean scaleOption = true;
|
---|
10643 | public double scaleFactor = 1.0D;
|
---|
10644 |
|
---|
10645 | public double function(double[] p, double[] x){
|
---|
10646 | double yScale = scaleFactor;
|
---|
10647 | if(scaleOption)yScale = p[2];
|
---|
10648 | double y = (yScale/Math.PI)*(p[1]/2.0D)/(Fmath.square(x[0]-p[0])+Fmath.square(p[1]/2.0D));
|
---|
10649 | return y;
|
---|
10650 | }
|
---|
10651 | }
|
---|
10652 |
|
---|
10653 | // Class to evaluate the Poisson function
|
---|
10654 | // y = yscale.(mu^k).exp(-mu)/k!.
|
---|
10655 | class PoissonFunction implements RegressionFunction{
|
---|
10656 | public boolean scaleOption = true;
|
---|
10657 | public double scaleFactor = 1.0D;
|
---|
10658 |
|
---|
10659 | public double function(double[] p, double[] x){
|
---|
10660 | double yScale = scaleFactor;
|
---|
10661 | if(scaleOption)yScale = p[1];
|
---|
10662 | double y = yScale*Math.pow(p[0],x[0])*Math.exp(-p[0])/Stat.factorial(x[0]);
|
---|
10663 | return y;
|
---|
10664 | }
|
---|
10665 | }
|
---|
10666 |
|
---|
10667 | // Class to evaluate the Gumbel function
|
---|
10668 | class GumbelFunction implements RegressionFunction{
|
---|
10669 | public boolean scaleOption = true;
|
---|
10670 | public double scaleFactor = 1.0D;
|
---|
10671 | public int typeFlag = 0; // set to 0 -> Minimum Mode Gumbel
|
---|
10672 | // reset to 1 -> Maximum Mode Gumbel
|
---|
10673 | // reset to 2 -> one parameter Minimum Mode Gumbel
|
---|
10674 | // reset to 3 -> one parameter Maximum Mode Gumbel
|
---|
10675 | // reset to 4 -> standard Minimum Mode Gumbel
|
---|
10676 | // reset to 5 -> standard Maximum Mode Gumbel
|
---|
10677 |
|
---|
10678 | public double function(double[] p, double[] x){
|
---|
10679 | double y=0.0D;
|
---|
10680 | double arg=0.0D;
|
---|
10681 | double yScale = scaleFactor;
|
---|
10682 |
|
---|
10683 | switch(this.typeFlag){
|
---|
10684 | case 0:
|
---|
10685 | // y = yscale*(1/gamma)*exp((x-mu)/gamma)*exp(-exp((x-mu)/gamma))
|
---|
10686 | arg = (x[0]-p[0])/p[1];
|
---|
10687 | if(scaleOption)yScale = p[2];
|
---|
10688 | y = (yScale/p[1])*Math.exp(arg)*Math.exp(-(Math.exp(arg)));
|
---|
10689 | break;
|
---|
10690 | case 1:
|
---|
10691 | // y = yscale*(1/gamma)*exp((mu-x)/gamma)*exp(-exp((mu-x)/gamma))
|
---|
10692 | arg = (p[0]-x[0])/p[1];
|
---|
10693 | if(scaleOption)yScale = p[2];
|
---|
10694 | y = (yScale/p[1])*Math.exp(arg)*Math.exp(-(Math.exp(arg)));
|
---|
10695 | break;
|
---|
10696 | case 2:
|
---|
10697 | // y = yscale*(1/gamma)*exp((x)/gamma)*exp(-exp((x)/gamma))
|
---|
10698 | arg = x[0]/p[0];
|
---|
10699 | if(scaleOption)yScale = p[1];
|
---|
10700 | y = (yScale/p[0])*Math.exp(arg)*Math.exp(-(Math.exp(arg)));
|
---|
10701 | break;
|
---|
10702 | case 3:
|
---|
10703 | // y = yscale*(1/gamma)*exp((-x)/gamma)*exp(-exp((-x)/gamma))
|
---|
10704 | arg = -x[0]/p[0];
|
---|
10705 | if(scaleOption)yScale = p[1];
|
---|
10706 | y = (yScale/p[0])*Math.exp(arg)*Math.exp(-(Math.exp(arg)));
|
---|
10707 | break;
|
---|
10708 | case 4:
|
---|
10709 | // y = yscale*exp(x)*exp(-exp(x))
|
---|
10710 | if(scaleOption)yScale = p[0];
|
---|
10711 | y = yScale*Math.exp(x[0])*Math.exp(-(Math.exp(x[0])));
|
---|
10712 | break;
|
---|
10713 | case 5:
|
---|
10714 | // y = yscale*exp(-x)*exp(-exp(-x))
|
---|
10715 | if(scaleOption)yScale = p[0];
|
---|
10716 | y = yScale*Math.exp(-x[0])*Math.exp(-(Math.exp(-x[0])));
|
---|
10717 | break;
|
---|
10718 | }
|
---|
10719 | return y;
|
---|
10720 | }
|
---|
10721 | }
|
---|
10722 |
|
---|
10723 | // Class to evaluate the Frechet function
|
---|
10724 | // y = yscale.(gamma/sigma)*((x - mu)/sigma)^(-gamma-1)*exp(-((x-mu)/sigma)^-gamma
|
---|
10725 | class FrechetFunctionOne implements RegressionFunction{
|
---|
10726 | public boolean scaleOption = true;
|
---|
10727 | public double scaleFactor = 1.0D;
|
---|
10728 | public int typeFlag = 0; // set to 0 -> Three Parameter Frechet
|
---|
10729 | // reset to 1 -> Two Parameter Frechet
|
---|
10730 | // reset to 2 -> Standard Frechet
|
---|
10731 |
|
---|
10732 | public double function(double[] p, double[] x){
|
---|
10733 | double y = 0.0D;
|
---|
10734 | boolean test = false;
|
---|
10735 | double yScale = scaleFactor;
|
---|
10736 |
|
---|
10737 | switch(typeFlag){
|
---|
10738 | case 0: if(x[0]>=p[0]){
|
---|
10739 | double arg = (x[0] - p[0])/p[1];
|
---|
10740 | if(scaleOption)yScale = p[3];
|
---|
10741 | y = yScale*(p[2]/p[1])*Math.pow(arg,-p[2]-1.0D)*Math.exp(-Math.pow(arg,-p[2]));
|
---|
10742 | }
|
---|
10743 | break;
|
---|
10744 | case 1: if(x[0]>=0.0D){
|
---|
10745 | double arg = x[0]/p[0];
|
---|
10746 | if(scaleOption)yScale = p[2];
|
---|
10747 | y = yScale*(p[1]/p[0])*Math.pow(arg,-p[1]-1.0D)*Math.exp(-Math.pow(arg,-p[1]));
|
---|
10748 | }
|
---|
10749 | break;
|
---|
10750 | case 2: if(x[0]>=0.0D){
|
---|
10751 | double arg = x[0];
|
---|
10752 | if(scaleOption)yScale = p[1];
|
---|
10753 | y = yScale*p[0]*Math.pow(arg,-p[0]-1.0D)*Math.exp(-Math.pow(arg,-p[0]));
|
---|
10754 | }
|
---|
10755 | break;
|
---|
10756 | }
|
---|
10757 | return y;
|
---|
10758 | }
|
---|
10759 | }
|
---|
10760 |
|
---|
10761 | // Class to evaluate the semi-linearised Frechet function
|
---|
10762 | // log(log(1/(1-Cumulative y) = gamma*log((x-mu)/sigma)
|
---|
10763 | class FrechetFunctionTwo implements RegressionFunction{
|
---|
10764 |
|
---|
10765 | public int typeFlag = 0; // set to 0 -> Three Parameter Frechet
|
---|
10766 | // reset to 1 -> Two Parameter Frechet
|
---|
10767 | // reset to 2 -> Standard Frechet
|
---|
10768 |
|
---|
10769 | public double function(double[] p, double[] x){
|
---|
10770 | double y=0.0D;
|
---|
10771 | switch(typeFlag){
|
---|
10772 | case 0: y = -p[2]*Math.log(Math.abs(x[0]-p[0])/p[1]);
|
---|
10773 | break;
|
---|
10774 | case 1: y = -p[1]*Math.log(Math.abs(x[0])/p[0]);
|
---|
10775 | break;
|
---|
10776 | case 2: y = -p[0]*Math.log(Math.abs(x[0]));
|
---|
10777 | break;
|
---|
10778 | }
|
---|
10779 |
|
---|
10780 | return y;
|
---|
10781 | }
|
---|
10782 | }
|
---|
10783 |
|
---|
10784 | // Class to evaluate the Weibull function
|
---|
10785 | // y = yscale.(gamma/sigma)*((x - mu)/sigma)^(gamma-1)*exp(-((x-mu)/sigma)^gamma
|
---|
10786 | class WeibullFunctionOne implements RegressionFunction{
|
---|
10787 | public boolean scaleOption = true;
|
---|
10788 | public double scaleFactor = 1.0D;
|
---|
10789 | public int typeFlag = 0; // set to 0 -> Three Parameter Weibull
|
---|
10790 | // reset to 1 -> Two Parameter Weibull
|
---|
10791 | // reset to 2 -> Standard Weibull
|
---|
10792 |
|
---|
10793 | public double function(double[] p, double[] x){
|
---|
10794 | double y = 0.0D;
|
---|
10795 | boolean test = false;
|
---|
10796 | double yScale = scaleFactor;
|
---|
10797 |
|
---|
10798 | switch(typeFlag){
|
---|
10799 | case 0: if(x[0]>=p[0]){
|
---|
10800 | double arg = (x[0] - p[0])/p[1];
|
---|
10801 | if(scaleOption)yScale = p[3];
|
---|
10802 | y = yScale*(p[2]/p[1])*Math.pow(arg,p[2]-1.0D)*Math.exp(-Math.pow(arg,p[2]));
|
---|
10803 | }
|
---|
10804 | break;
|
---|
10805 | case 1: if(x[0]>=0.0D){
|
---|
10806 | double arg = x[0]/p[0];
|
---|
10807 | if(scaleOption)yScale = p[2];
|
---|
10808 | y = yScale*(p[1]/p[0])*Math.pow(arg,p[1]-1.0D)*Math.exp(-Math.pow(arg,p[1]));
|
---|
10809 | }
|
---|
10810 | break;
|
---|
10811 | case 2: if(x[0]>=0.0D){
|
---|
10812 | double arg = x[0];
|
---|
10813 | if(scaleOption)yScale = p[1];
|
---|
10814 | y = yScale*p[0]*Math.pow(arg,p[0]-1.0D)*Math.exp(-Math.pow(arg,p[0]));
|
---|
10815 | }
|
---|
10816 | break;
|
---|
10817 | }
|
---|
10818 | return y;
|
---|
10819 | }
|
---|
10820 | }
|
---|
10821 |
|
---|
10822 | // Class to evaluate the semi-linearised Weibull function
|
---|
10823 | // log(log(1/(1-Cumulative y) = gamma*log((x-mu)/sigma)
|
---|
10824 | class WeibullFunctionTwo implements RegressionFunction{
|
---|
10825 |
|
---|
10826 | public int typeFlag = 0; // set to 0 -> Three Parameter Weibull
|
---|
10827 | // reset to 1 -> Two Parameter Weibull
|
---|
10828 | // reset to 2 -> Standard Weibull
|
---|
10829 |
|
---|
10830 | public double function(double[] p, double[] x){
|
---|
10831 | double y=0.0D;
|
---|
10832 | switch(typeFlag){
|
---|
10833 | case 0: y = p[2]*Math.log(Math.abs(x[0]-p[0])/p[1]);
|
---|
10834 | break;
|
---|
10835 | case 1: y = p[1]*Math.log(Math.abs(x[0])/p[0]);
|
---|
10836 | break;
|
---|
10837 | case 2: y = p[0]*Math.log(Math.abs(x[0]));
|
---|
10838 | break;
|
---|
10839 | }
|
---|
10840 |
|
---|
10841 | return y;
|
---|
10842 | }
|
---|
10843 | }
|
---|
10844 |
|
---|
10845 | // Class to evaluate the Rayleigh function
|
---|
10846 | // y = (yscale/sigma)*(x/sigma)*exp(-0.5((x-mu)/sigma)^2
|
---|
10847 | class RayleighFunctionOne implements RegressionFunction{
|
---|
10848 | public boolean scaleOption = true;
|
---|
10849 | public double scaleFactor = 1.0D;
|
---|
10850 |
|
---|
10851 | public double function(double[] p, double[] x){
|
---|
10852 | double y = 0.0D;
|
---|
10853 | boolean test = false;
|
---|
10854 | double yScale = scaleFactor;
|
---|
10855 | if(scaleOption)yScale = p[1];
|
---|
10856 | if(x[0]>=0.0D){
|
---|
10857 | double arg = x[0]/p[0];
|
---|
10858 | y = (yScale/p[0])*arg*Math.exp(-0.5D*Math.pow(arg,2));
|
---|
10859 | }
|
---|
10860 | return y;
|
---|
10861 | }
|
---|
10862 | }
|
---|
10863 |
|
---|
10864 |
|
---|
10865 | // Class to evaluate the semi-linearised Rayleigh function
|
---|
10866 | // log(1/(1-Cumulative y) = 0.5*(x/sigma)^2
|
---|
10867 | class RayleighFunctionTwo implements RegressionFunction{
|
---|
10868 |
|
---|
10869 | public double function(double[] p, double[] x){
|
---|
10870 | double y = 0.5D*Math.pow(x[0]/p[0],2);
|
---|
10871 | return y;
|
---|
10872 | }
|
---|
10873 | }
|
---|
10874 |
|
---|
10875 | // class to evaluate a simple exponential function
|
---|
10876 | class ExponentialSimpleFunction implements RegressionFunction{
|
---|
10877 | public boolean scaleOption = true;
|
---|
10878 | public double scaleFactor = 1.0D;
|
---|
10879 |
|
---|
10880 | public double function(double[] p, double[] x){
|
---|
10881 | double yScale = scaleFactor;
|
---|
10882 | if(scaleOption)yScale = p[1];
|
---|
10883 | double y = yScale*Math.exp(p[0]*x[0]);
|
---|
10884 | return y;
|
---|
10885 | }
|
---|
10886 | }
|
---|
10887 |
|
---|
10888 | // class to evaluate multiple exponentials function
|
---|
10889 | class ExponentialMultipleFunction implements RegressionFunction{
|
---|
10890 |
|
---|
10891 | public int nExps = 0;
|
---|
10892 |
|
---|
10893 | public double function(double[] p, double[] x){
|
---|
10894 | double y = 0;
|
---|
10895 | for(int i=0; i<nExps; i+=2){
|
---|
10896 | y += p[i]*Math.exp(p[i+1]*x[0]);
|
---|
10897 | }
|
---|
10898 | return y;
|
---|
10899 | }
|
---|
10900 | }
|
---|
10901 |
|
---|
10902 | // class to evaluate 1 - exponential function
|
---|
10903 | class OneMinusExponentialFunction implements RegressionFunction{
|
---|
10904 | public boolean scaleOption = true;
|
---|
10905 | public double scaleFactor = 1.0D;
|
---|
10906 |
|
---|
10907 | public double function(double[] p, double[] x){
|
---|
10908 | double yScale = scaleFactor;
|
---|
10909 | if(scaleOption)yScale = p[0];
|
---|
10910 | double y = yScale*(1 - Math.exp(p[1]*x[0]));
|
---|
10911 | return y;
|
---|
10912 | }
|
---|
10913 | }
|
---|
10914 |
|
---|
10915 | // class to evaluate a exponential distribution function
|
---|
10916 | class ExponentialFunction implements RegressionFunction{
|
---|
10917 | public boolean scaleOption = true;
|
---|
10918 | public double scaleFactor = 1.0D;
|
---|
10919 | public int typeFlag = 0; // set to 0 -> Two Parameter Exponential
|
---|
10920 | // reset to 1 -> One Parameter Exponential
|
---|
10921 | // reset to 2 -> Standard Exponential
|
---|
10922 |
|
---|
10923 | public double function(double[] p, double[] x){
|
---|
10924 | double y = 0.0D;
|
---|
10925 | boolean test = false;
|
---|
10926 | double yScale = scaleFactor;
|
---|
10927 |
|
---|
10928 | switch(typeFlag){
|
---|
10929 | case 0: if(x[0]>=p[0]){
|
---|
10930 | if(scaleOption)yScale = p[2];
|
---|
10931 | double arg = (x[0] - p[0])/p[1];
|
---|
10932 | y = (yScale/p[1])*Math.exp(-arg);
|
---|
10933 | }
|
---|
10934 | break;
|
---|
10935 | case 1: if(x[0]>=0.0D){
|
---|
10936 | double arg = x[0]/p[0];
|
---|
10937 | if(scaleOption)yScale = p[1];
|
---|
10938 | y = (yScale/p[0])*Math.exp(-arg);
|
---|
10939 | }
|
---|
10940 | break;
|
---|
10941 | case 2: if(x[0]>=0.0D){
|
---|
10942 | double arg = x[0];
|
---|
10943 | if(scaleOption)yScale = p[0];
|
---|
10944 | y = yScale*Math.exp(-arg);
|
---|
10945 | }
|
---|
10946 | break;
|
---|
10947 | }
|
---|
10948 | return y;
|
---|
10949 | }
|
---|
10950 | }
|
---|
10951 |
|
---|
10952 | // class to evaluate a Pareto scaled pdf
|
---|
10953 | class ParetoFunctionOne implements RegressionFunction{
|
---|
10954 | public boolean scaleOption = true;
|
---|
10955 | public double scaleFactor = 1.0D;
|
---|
10956 | public int typeFlag = 0; // set to 3 -> Shifted Pareto
|
---|
10957 | // set to 2 -> Two Parameter Pareto
|
---|
10958 | // set to 1 -> One Parameter Pareto
|
---|
10959 |
|
---|
10960 | public double function(double[] p, double[] x){
|
---|
10961 | double y = 0.0D;
|
---|
10962 | boolean test = false;
|
---|
10963 | double yScale = scaleFactor;
|
---|
10964 |
|
---|
10965 | switch(typeFlag){
|
---|
10966 | case 3: if(x[0]>=p[1]+p[2]){
|
---|
10967 | if(scaleOption)yScale = p[3];
|
---|
10968 | y = yScale*p[0]*Math.pow(p[1],p[0])/Math.pow((x[0]-p[2]),p[0]+1.0D);
|
---|
10969 | }
|
---|
10970 | break;
|
---|
10971 | case 2: if(x[0]>=p[1]){
|
---|
10972 | if(scaleOption)yScale = p[2];
|
---|
10973 | y = yScale*p[0]*Math.pow(p[1],p[0])/Math.pow(x[0],p[0]+1.0D);
|
---|
10974 | }
|
---|
10975 | break;
|
---|
10976 | case 1: if(x[0]>=1.0D){
|
---|
10977 | double arg = x[0]/p[0];
|
---|
10978 | if(scaleOption)yScale = p[1];
|
---|
10979 | y = yScale*p[0]/Math.pow(x[0],p[0]+1.0D);
|
---|
10980 | }
|
---|
10981 | break;
|
---|
10982 | }
|
---|
10983 | return y;
|
---|
10984 | }
|
---|
10985 | }
|
---|
10986 |
|
---|
10987 | // class to evaluate a Pareto cdf
|
---|
10988 | class ParetoFunctionTwo implements RegressionFunction{
|
---|
10989 |
|
---|
10990 | public int typeFlag = 0; // set to 3 -> Shifted Pareto
|
---|
10991 | // set to 2 -> Two Parameter Pareto
|
---|
10992 | // set to 1 -> One Parameter Pareto
|
---|
10993 |
|
---|
10994 | public double function(double[] p, double[] x){
|
---|
10995 | double y = 0.0D;
|
---|
10996 | switch(typeFlag){
|
---|
10997 | case 3: if(x[0]>=p[1]+p[2]){
|
---|
10998 | y = 1.0D - Math.pow(p[1]/(x[0]-p[2]),p[0]);
|
---|
10999 | }
|
---|
11000 | break;
|
---|
11001 | case 2: if(x[0]>=p[1]){
|
---|
11002 | y = 1.0D - Math.pow(p[1]/x[0],p[0]);
|
---|
11003 | }
|
---|
11004 | break;
|
---|
11005 | case 1: if(x[0]>=1.0D){
|
---|
11006 | y = 1.0D - Math.pow(1.0D/x[0],p[0]);
|
---|
11007 | }
|
---|
11008 | break;
|
---|
11009 | }
|
---|
11010 | return y;
|
---|
11011 | }
|
---|
11012 | }
|
---|
11013 |
|
---|
11014 | // class to evaluate a Sigmoidal threshold function
|
---|
11015 | class SigmoidThresholdFunction implements RegressionFunction{
|
---|
11016 | public boolean scaleOption = true;
|
---|
11017 | public double scaleFactor = 1.0D;
|
---|
11018 |
|
---|
11019 | public double function(double[] p, double[] x){
|
---|
11020 | double yScale = scaleFactor;
|
---|
11021 | if(scaleOption)yScale = p[2];
|
---|
11022 | double y = yScale/(1.0D + Math.exp(-p[0]*(x[0] - p[1])));
|
---|
11023 | return y;
|
---|
11024 | }
|
---|
11025 | }
|
---|
11026 |
|
---|
11027 | // class to evaluate a Rectangular Hyberbola
|
---|
11028 | class RectangularHyperbolaFunction implements RegressionFunction{
|
---|
11029 | public boolean scaleOption = true;
|
---|
11030 | public double scaleFactor = 1.0D;
|
---|
11031 |
|
---|
11032 | public double function(double[] p, double[] x){
|
---|
11033 | double yScale = scaleFactor;
|
---|
11034 | if(scaleOption)yScale = p[2];
|
---|
11035 | double y = yScale*x[0]/(p[0] + x[0]);
|
---|
11036 | return y;
|
---|
11037 | }
|
---|
11038 |
|
---|
11039 | }
|
---|
11040 |
|
---|
11041 | // class to evaluate a scaled Heaviside Step Function
|
---|
11042 | class StepFunctionFunction implements RegressionFunction{
|
---|
11043 | public boolean scaleOption = true;
|
---|
11044 | public double scaleFactor = 1.0D;
|
---|
11045 |
|
---|
11046 | public double function(double[] p, double[] x){
|
---|
11047 | double yScale = scaleFactor;
|
---|
11048 | if(scaleOption)yScale = p[1];
|
---|
11049 | double y = 0.0D;
|
---|
11050 | if(x[0]>p[0])y = yScale;
|
---|
11051 | return y;
|
---|
11052 | }
|
---|
11053 | }
|
---|
11054 |
|
---|
11055 | // class to evaluate a Hill or Sips sigmoidal function
|
---|
11056 | class SigmoidHillSipsFunction implements RegressionFunction{
|
---|
11057 | public boolean scaleOption = true;
|
---|
11058 | public double scaleFactor = 1.0D;
|
---|
11059 |
|
---|
11060 | public double function(double[] p, double[] x){
|
---|
11061 | double yScale = scaleFactor;
|
---|
11062 | if(scaleOption)yScale = p[2];
|
---|
11063 | double xterm = Math.pow(x[0],p[1]);
|
---|
11064 | double y = yScale*xterm/(Math.pow(p[0], p[1]) + xterm);
|
---|
11065 | return y;
|
---|
11066 | }
|
---|
11067 | }
|
---|
11068 |
|
---|
11069 | // Class to evaluate the Logistic probability function y = yscale*exp(-(x-mu)/beta)/(beta*(1 + exp(-(x-mu)/beta))^2.
|
---|
11070 | class LogisticFunction implements RegressionFunction{
|
---|
11071 | public boolean scaleOption = true;
|
---|
11072 | public double scaleFactor = 1.0D;
|
---|
11073 | public double function(double[] p, double[] x){
|
---|
11074 | double yScale = scaleFactor;
|
---|
11075 | if(scaleOption)yScale = p[2];
|
---|
11076 | double y = yScale*Fmath.square(Fmath.sech((x[0] - p[0])/(2.0D*p[1])))/(4.0D*p[1]);
|
---|
11077 | return y;
|
---|
11078 | }
|
---|
11079 | }
|
---|
11080 |
|
---|
11081 | // class to evaluate a Beta scaled pdf
|
---|
11082 | class BetaFunction implements RegressionFunction{
|
---|
11083 | public boolean scaleOption = true;
|
---|
11084 | public double scaleFactor = 1.0D;
|
---|
11085 | public int typeFlag = 0; // set to 0 -> Beta Distibution - [0, 1] interval
|
---|
11086 | // set to 1 -> Beta Distibution - [min, max] interval
|
---|
11087 |
|
---|
11088 | public double function(double[] p, double[] x){
|
---|
11089 | double y = 0.0D;
|
---|
11090 | boolean test = false;
|
---|
11091 | double yScale = scaleFactor;
|
---|
11092 |
|
---|
11093 | switch(typeFlag){
|
---|
11094 | case 0: if(scaleOption)yScale = p[2];
|
---|
11095 | y = yScale*Math.pow(x[0],p[0]-1.0D)*Math.pow(1.0D-x[0],p[1]-1.0D)/Stat.betaFunction(p[0],p[1]);
|
---|
11096 | break;
|
---|
11097 | case 1: if(scaleOption)yScale = p[4];
|
---|
11098 | y = yScale*Math.pow(x[0]-p[2],p[0]-1.0D)*Math.pow(p[3]-x[0],p[1]-1.0D)/Stat.betaFunction(p[0],p[1]);
|
---|
11099 | y = y/Math.pow(p[3]-p[2],p[0]+p[1]-1.0D);
|
---|
11100 | break;
|
---|
11101 | }
|
---|
11102 | return y;
|
---|
11103 | }
|
---|
11104 | }
|
---|
11105 |
|
---|
11106 | // class to evaluate a Gamma scaled pdf
|
---|
11107 | class GammaFunction implements RegressionFunction{
|
---|
11108 | public boolean scaleOption = true;
|
---|
11109 | public double scaleFactor = 1.0D;
|
---|
11110 | public int typeFlag = 0; // set to 0 -> Three parameter Gamma Distribution
|
---|
11111 | // set to 1 -> Standard Gamma Distribution
|
---|
11112 |
|
---|
11113 | public double function(double[] p, double[] x){
|
---|
11114 | double y = 0.0D;
|
---|
11115 | boolean test = false;
|
---|
11116 | double yScale = scaleFactor;
|
---|
11117 |
|
---|
11118 | switch(typeFlag){
|
---|
11119 | case 0: if(scaleOption)yScale = p[3];
|
---|
11120 | double xTerm = (x[0] - p[0])/p[1];
|
---|
11121 | y = yScale*Math.pow(xTerm,p[2]-1.0D)*Math.exp(-xTerm)/(p[1]*Stat.gammaFunction(p[2]));
|
---|
11122 | break;
|
---|
11123 | case 1: if(scaleOption)yScale = p[1];
|
---|
11124 | y = yScale*Math.pow(x[0],p[0]-1.0D)*Math.exp(-x[0])/Stat.gammaFunction(p[0]);
|
---|
11125 | break;
|
---|
11126 | }
|
---|
11127 | return y;
|
---|
11128 | }
|
---|
11129 | }
|
---|
11130 |
|
---|
11131 | // class to evaluate a Erlang scaled pdf
|
---|
11132 | // rate parameter is fixed
|
---|
11133 | class ErlangFunction implements RegressionFunction{
|
---|
11134 | public boolean scaleOption = true;
|
---|
11135 | public double scaleFactor = 1.0D;
|
---|
11136 | public double kay = 1.0D; // rate parameter
|
---|
11137 |
|
---|
11138 | public double function(double[] p, double[] x){
|
---|
11139 | boolean test = false;
|
---|
11140 | double yScale = scaleFactor;
|
---|
11141 |
|
---|
11142 | if(scaleOption)yScale = p[1];
|
---|
11143 |
|
---|
11144 | double y = kay*Math.log(p[0]) + (kay - 1)*Math.log(x[0]) - x[0]*p[0] - Fmath.logFactorial(kay - 1);
|
---|
11145 | y = yScale*Math.exp(y);
|
---|
11146 |
|
---|
11147 | return y;
|
---|
11148 | }
|
---|
11149 | }
|
---|
11150 |
|
---|
11151 | // class to evaluate a EC50 function
|
---|
11152 | class EC50Function implements RegressionFunction{
|
---|
11153 |
|
---|
11154 | public double function(double[] p, double[] x){
|
---|
11155 | double y = p[0] + (p[1] - p[0])/(1.0D + Math.pow(x[0]/p[2], p[3]));
|
---|
11156 | return y;
|
---|
11157 | }
|
---|
11158 | }
|
---|
11159 |
|
---|
11160 |
|
---|
11161 | // class to evaluate a Non-Integer Polynomial function
|
---|
11162 | class NonIntegerPolyFunction implements RegressionFunction{
|
---|
11163 |
|
---|
11164 | public double function(double[] p, double[] x){
|
---|
11165 | double y = p[0] + p[1]*x[0] + p[2]*Math.pow(x[0], p[3]);
|
---|
11166 | return y;
|
---|
11167 | }
|
---|
11168 | }
|
---|
11169 |
|
---|