Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Download
96129 views
1
# -*- coding: utf-8 -*-
2
"""Copyright 2015 Roger R Labbe Jr.
3
4
FilterPy library.
5
http://github.com/rlabbe/filterpy
6
7
Documentation at:
8
https://filterpy.readthedocs.org
9
10
Supporting book at:
11
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
12
13
This is licensed under an MIT license. See the readme.MD file
14
for more information.
15
"""
16
17
18
from __future__ import (absolute_import, division, print_function,
19
unicode_literals)
20
21
import numpy as np
22
from numpy import dot
23
24
25
class GHFilterOrder(object):
26
""" A g-h filter of aspecified order 0, 1, or 2.
27
28
Strictly speaking, the g-h filter is order 1, and the 2nd order
29
filter is called the g-h-k filter. I'm not aware of any filter name
30
that encompasses orders 0, 1, and 2 under one name, or I would use it.
31
32
|
33
|
34
35
**Methods**
36
"""
37
38
39
def __init__(self, x0, dt, order, g, h=None, k=None):
40
""" Creates a g-h filter of order 0, 1, or 2.
41
42
**Parameters**
43
44
x0 : 1D np.array or scalar
45
Initial value for the filter state. Each value can be a scalar
46
or a np.array.
47
48
You can use a scalar for x0. If order > 0, then 0.0 is assumed
49
for the higher order terms.
50
51
x[0] is the value being tracked
52
x[1] is the first derivative (for order 1 and 2 filters)
53
x[2] is the second derivative (for order 2 filters)
54
55
dt : scalar
56
timestep
57
58
order : int
59
order of the filter. Defines the order of the system
60
0 - assumes system of form x = a_0 + a_1*t
61
1 - assumes system of form x = a_0 +a_1*t + a_2*t^2
62
2 - assumes system of form x = a_0 +a_1*t + a_2*t^2 + a_3*t^3
63
64
g : float
65
filter g gain parameter.
66
67
h : float, optional
68
filter h gain parameter, order 1 and 2 only
69
70
k : float, optional
71
filter k gain parameter, order 2 only
72
73
**Members**
74
75
self.x : np.array
76
State of the filter.
77
x[0] is the value being tracked
78
x[1] is the derivative of x[0] (order 1 and 2 only)
79
x[2] is the 2nd derivative of x[0] (order 2 only)
80
81
This is always an np.array, even for order 0 where you can
82
initialize x0 with a scalar.
83
84
self.residual : np.array
85
difference between the measurement and the prediction
86
"""
87
88
89
assert order >= 0
90
assert order <= 2
91
92
if np.isscalar(x0):
93
self.x = np.zeros(order+1)
94
self.x[0] = x0
95
else:
96
self.x = np.copy(x0.astype(float))
97
98
self.dt = dt
99
self.order = order
100
101
self.g = g
102
self.h = h
103
self.k = k
104
105
106
def update(self, z, g=None, h=None, k=None):
107
""" update the filter with measurement z. z must be the same type
108
or treatable as the same type as self.x[0].
109
"""
110
111
if self.order == 0:
112
if g is None:
113
g = self.g
114
self.residual = z - self.x[0]
115
self.x += dot(g, self.residual)
116
117
elif self.order == 1:
118
if g is None:
119
g = self.g
120
if h is None:
121
h = self.h
122
x = self.x[0]
123
dx = self.x[1]
124
dxdt = dot(dx, self.dt)
125
126
self.residual = z - (x + dxdt)
127
self.x[0] = x + dxdt + g*self.residual
128
self.x[1] = dx + h*self.residual / self.dt
129
130
else: # order == 2
131
if g is None:
132
g = self.g
133
if h is None:
134
h = self.h
135
if k is None:
136
k = self.k
137
138
x = self.x[0]
139
dx = self.x[1]
140
ddx = self.x[2]
141
dxdt = dot(dx, self.dt)
142
T2 = self.dt**2.
143
144
self.residual = z -(x + dxdt +0.5*ddx*T2)
145
146
self.x[0] = x + dxdt + 0.5*ddx*T2 + g*self.residual
147
self.x[1] = dx + ddx*self.dt + h*self.residual / self.dt
148
self.x[2] = ddx + 2*self.k*self.residual / (self.dt**2)
149
150
151
class GHFilter(object):
152
""" Implements the g-h filter. The topic is too large to cover in
153
this comment. See my book "Kalman and Bayesian Filters in Python" [1]
154
or Eli Brookner's "Tracking and Kalman Filters Made Easy" [2].
155
156
A few basic examples are below, and the tests in ./gh_tests.py may
157
give you more ideas on use.
158
159
**Examples**
160
161
Create a basic filter for a scalar value with g=.8, h=.2.
162
Initialize to 0, with a derivative(velocity) of 0.
163
164
>>> from filterpy.gh import GHFilter
165
>>> f = GHFilter (x=0., dx=0., dt=1., g=.8, h=.2)
166
167
Incorporate the measurement of 1
168
169
>>> f.update(z=1)
170
(0.8, 0.2)
171
172
Incorporate a measurement of 2 with g=1 and h=0.01
173
174
>>> f.update(z=2, g=1, h=0.01)
175
(2.0, 0.21000000000000002)
176
177
Create a filter with two independent variables.
178
179
>>> from numpy import array
180
>>> f = GHFilter (x=array([0,1]), dx=array([0,0]), dt=1, g=.8, h=.02)
181
182
and update with the measurements (2,4)
183
184
>>> f.update(array([2,4])
185
(array([ 1.6, 3.4]), array([ 0.04, 0.06]))
186
187
188
**References**
189
190
[1] Labbe, "Kalman and Bayesian Filters in Python"
191
http://rlabbe.github.io/Kalman-and-Bayesian-Filters-in-Python
192
193
[2] Brookner, "Tracking and Kalman Filters Made Easy". John Wiley and
194
Sons, 1998.
195
196
|
197
|
198
199
**Methods**
200
"""
201
202
def __init__(self, x, dx, dt, g, h):
203
""" Creates a g-h filter.
204
205
**Parameters**
206
207
x : 1D np.array or scalar
208
Initial value for the filter state. Each value can be a scalar
209
or a np.array.
210
211
You can use a scalar for x0. If order > 0, then 0.0 is assumed
212
for the higher order terms.
213
214
x[0] is the value being tracked
215
x[1] is the first derivative (for order 1 and 2 filters)
216
x[2] is the second derivative (for order 2 filters)
217
218
dx : 1D np.array or scalar
219
Initial value for the derivative of the filter state.
220
221
dt : scalar
222
time step
223
224
g : float
225
filter g gain parameter.
226
227
h : float
228
filter h gain parameter.
229
"""
230
231
assert np.isscalar(dt)
232
assert np.isscalar(g)
233
assert np.isscalar(h)
234
235
self.x = x
236
self.dx = dx
237
self.dt = dt
238
self.g = g
239
self.h = h
240
241
242
def update (self, z, g=None, h=None):
243
"""performs the g-h filter predict and update step on the
244
measurement z. Modifies the member variables listed below,
245
and returns the state of x and dx as a tuple as a convienence.
246
247
**Modified Members**
248
249
x
250
filtered state variable
251
252
dx
253
derivative (velocity) of x
254
255
residual
256
difference between the measurement and the prediction for x
257
258
x_prediction
259
predicted value of x before incorporating the measurement z.
260
261
dx_prediction
262
predicted value of the derivative of x before incorporating the
263
measurement z.
264
265
**Parameters**
266
267
z : any
268
the measurement
269
g : scalar (optional)
270
Override the fixed self.g value for this update
271
h : scalar (optional)
272
Override the fixed self.h value for this update
273
274
**Returns**
275
276
x filter output for x
277
dx filter output for dx (derivative of x
278
"""
279
280
if g is None:
281
g = self.g
282
if h is None:
283
h = self.h
284
285
#prediction step
286
self.dx_prediction = self.dx
287
self.x_prediction = self.x + (self.dx*self.dt)
288
289
# update step
290
self.residual = z - self.x_prediction
291
self.dx = self.dx_prediction + h * self.residual / self.dt
292
self.x = self.x_prediction + g * self.residual
293
294
return (self.x, self.dx)
295
296
297
def batch_filter (self, data, save_predictions=False):
298
"""
299
Given a sequenced list of data, performs g-h filter
300
with a fixed g and h. See update() if you need to vary g and/or h.
301
302
Uses self.x and self.dx to initialize the filter, but DOES NOT
303
alter self.x and self.dx during execution, allowing you to use this
304
class multiple times without reseting self.x and self.dx. I'm not sure
305
how often you would need to do that, but the capability is there.
306
More exactly, none of the class member variables are modified
307
by this function, in distinct contrast to update(), which changes
308
most of them.
309
310
**Parameters**
311
312
data : list like
313
contains the data to be filtered.
314
315
save_predictions : boolean
316
the predictions will be saved and returned if this is true
317
318
**Returns**
319
320
results : np.array shape (n+1, 2), where n=len(data)
321
contains the results of the filter, where
322
results[i,0] is x , and
323
results[i,1] is dx (derivative of x)
324
First entry is the initial values of x and dx as set by __init__.
325
326
predictions : np.array shape(n), optional
327
the predictions for each step in the filter. Only retured if
328
save_predictions == True
329
"""
330
331
x = self.x
332
dx = self.dx
333
n = len(data)
334
335
results = np.zeros((n+1, 2))
336
results[0,0] = x
337
results[0,1] = dx
338
339
if save_predictions:
340
predictions = np.zeros(n)
341
342
# optimization to avoid n computations of h / dt
343
h_dt = self.h / self.dt
344
345
for i,z in enumerate(data):
346
#prediction step
347
x_est = x + (dx*self.dt)
348
349
# update step
350
residual = z - x_est
351
dx = dx + h_dt * residual # i.e. dx = dx + h * residual / dt
352
x = x_est + self.g * residual
353
354
results[i+1,0] = x
355
results[i+1,1] = dx
356
if save_predictions:
357
predictions[i] = x_est
358
359
if save_predictions:
360
return results, predictions
361
else:
362
return results
363
364
365
def VRF_prediction(self):
366
""" Returns the Variance Reduction Factor of the prediction
367
step of the filter. The VRF is the
368
normalized variance for the filter, as given in the equation below.
369
370
.. math::
371
VRF(\hat{x}_{n+1,n}) = \\frac{VAR(\hat{x}_{n+1,n})}{\sigma^2_x}
372
373
**References**
374
375
Asquith, "Weight Selection in First Order Linear Filters"
376
Report No RG-TR-69-12, U.S. Army Missle Command. Redstone Arsenal, Al.
377
November 24, 1970.
378
"""
379
380
g = self.g
381
h = self.h
382
383
return (2*g**2 + 2*h + g*h) / (g*(4 - 2*g - h))
384
385
386
def VRF(self):
387
""" Returns the Variance Reduction Factor (VRF) of the state variable
388
of the filter (x) and its derivatives (dx, ddx). The VRF is the
389
normalized variance for the filter, as given in the equations below.
390
391
.. math::
392
VRF(\hat{x}_{n,n}) = \\frac{VAR(\hat{x}_{n,n})}{\sigma^2_x}
393
394
VRF(\hat{\dot{x}}_{n,n}) = \\frac{VAR(\hat{\dot{x}}_{n,n})}{\sigma^2_x}
395
396
VRF(\hat{\ddot{x}}_{n,n}) = \\frac{VAR(\hat{\ddot{x}}_{n,n})}{\sigma^2_x}
397
398
**Returns**
399
400
vrf_x VRF of x state variable
401
vrf_dx VRF of the dx state variable (derivative of x)
402
"""
403
404
g = self.g
405
h = self.h
406
407
den = g*(4 - 2*g - h)
408
409
vx = (2*g**2 + 2*h - 3*g*h) / den
410
vdx = 2*h**2 / (self.dt**2 * den)
411
412
return (vx, vdx)
413
414
415
class GHKFilter(object):
416
""" Implements the g-h-k filter.
417
418
**References**
419
420
Brookner, "Tracking and Kalman Filters Made Easy". John Wiley and
421
Sons, 1998.
422
423
|
424
|
425
426
**Methods**
427
"""
428
429
def __init__(self, x, dx, ddx, dt, g, h, k):
430
""" Creates a g-h filter.
431
432
**Parameters**
433
434
x : 1D np.array or scalar
435
Initial value for the filter state. Each value can be a scalar
436
or a np.array.
437
438
You can use a scalar for x0. If order > 0, then 0.0 is assumed
439
for the higher order terms.
440
441
x[0] is the value being tracked
442
x[1] is the first derivative (for order 1 and 2 filters)
443
x[2] is the second derivative (for order 2 filters)
444
445
dx : 1D np.array or scalar
446
Initial value for the derivative of the filter state.
447
448
ddx : 1D np.array or scalar
449
Initial value for the second derivative of the filter state.
450
451
dt : scalar
452
time step
453
454
g : float
455
filter g gain parameter.
456
457
h : float
458
filter h gain parameter.
459
460
k : float
461
filter k gain parameter.
462
"""
463
464
assert np.isscalar(dt)
465
assert np.isscalar(g)
466
assert np.isscalar(h)
467
468
self.x = x
469
self.dx = dx
470
self.ddx = ddx
471
self.dt = dt
472
self.g = g
473
self.h = h
474
self.k = k
475
476
477
def update (self, z, g=None, h=None, k=None):
478
"""performs the g-h filter predict and update step on the
479
measurement z.
480
481
On return, self.x, self.dx, self.residual, and self.x_prediction
482
will have been updated with the results of the computation. For
483
convienence, self.x and self.dx are returned in a tuple.
484
485
**Parameters**
486
487
z : scalar
488
the measurement
489
g : scalar (optional)
490
Override the fixed self.g value for this update
491
h : scalar (optional)
492
Override the fixed self.h value for this update
493
k : scalar (optional)
494
Override the fixed self.k value for this update
495
496
**Returns**
497
498
x filter output for x
499
dx filter output for dx (derivative of x
500
501
"""
502
503
if g is None:
504
g = self.g
505
if h is None:
506
h = self.h
507
if k is None:
508
k = self.k
509
510
dt = self.dt
511
dt_sqr = dt**2
512
#prediction step
513
self.ddx_prediction = self.ddx
514
self.dx_prediction = self.dx + self.ddx*dt
515
self.x_prediction = self.x + self.dx*dt + .5*self.ddx*(dt_sqr)
516
517
# update step
518
self.residual = z - self.x_prediction
519
520
self.ddx = self.ddx_prediction + 2*k*self.residual / dt_sqr
521
self.dx = self.dx_prediction + h * self.residual / dt
522
self.x = self.x_prediction + g * self.residual
523
524
return (self.x, self.dx)
525
526
527
def batch_filter (self, data, save_predictions=False):
528
"""
529
Performs g-h filter with a fixed g and h.
530
531
Uses self.x and self.dx to initialize the filter, but DOES NOT
532
alter self.x and self.dx during execution, allowing you to use this
533
class multiple times without reseting self.x and self.dx. I'm not sure
534
how often you would need to do that, but the capability is there.
535
More exactly, none of the class member variables are modified
536
by this function.
537
538
**Parameters**
539
540
data : list_like
541
contains the data to be filtered.
542
543
save_predictions : boolean
544
The predictions will be saved and returned if this is true
545
546
**Returns**
547
548
results : np.array shape (n+1, 2), where n=len(data)
549
contains the results of the filter, where
550
results[i,0] is x , and
551
results[i,1] is dx (derivative of x)
552
First entry is the initial values of x and dx as set by __init__.
553
554
predictions : np.array shape(n), or None
555
the predictions for each step in the filter. Only returned if
556
save_predictions == True
557
"""
558
559
x = self.x
560
dx = self.dx
561
n = len(data)
562
563
results = np.zeros((n+1, 2))
564
results[0,0] = x
565
results[0,1] = dx
566
567
if save_predictions:
568
predictions = np.zeros(n)
569
570
# optimization to avoid n computations of h / dt
571
h_dt = self.h / self.dt
572
573
for i,z in enumerate(data):
574
#prediction step
575
x_est = x + (dx*self.dt)
576
577
# update step
578
residual = z - x_est
579
dx = dx + h_dt * residual # i.e. dx = dx + h * residual / dt
580
x = x_est + self.g * residual
581
582
results[i+1,0] = x
583
results[i+1,1] = dx
584
if save_predictions:
585
predictions[i] = x_est
586
587
if save_predictions:
588
return results, predictions
589
else:
590
return results
591
592
593
def VRF_prediction(self):
594
""" Returns the Variance Reduction Factor for x of the prediction
595
step of the filter.
596
597
This implements the equation
598
599
.. math::
600
VRF(\hat{x}_{n+1,n}) = \\frac{VAR(\hat{x}_{n+1,n})}{\sigma^2_x}
601
602
**References**
603
604
Asquith and Woods, "Total Error Minimization in First
605
and Second Order Prediction Filters" Report No RE-TR-70-17, U.S.
606
Army Missle Command. Redstone Arsenal, Al. November 24, 1970.
607
"""
608
609
g = self.g
610
h = self.h
611
k = self.k
612
gh2 = 2*g + h
613
return ((g*k*(gh2-4)+ h*(g*gh2+2*h)) /
614
(2*k - (g*(h+k)*(gh2-4))))
615
616
617
def bias_error(self, dddx):
618
""" Returns the bias error given the specified constant jerk(dddx)
619
620
**Parameters**
621
622
dddx : type(self.x)
623
3rd derivative (jerk) of the state variable x.
624
625
**References**
626
627
Asquith and Woods, "Total Error Minimization in First
628
and Second Order Prediction Filters" Report No RE-TR-70-17, U.S.
629
Army Missle Command. Redstone Arsenal, Al. November 24, 1970.
630
"""
631
return -self.dt**3 * dddx / (2*self.k)
632
633
634
def VRF(self):
635
""" Returns the Variance Reduction Factor (VRF) of the state variable
636
of the filter (x) and its derivatives (dx, ddx). The VRF is the
637
normalized variance for the filter, as given in the equations below.
638
639
.. math::
640
VRF(\hat{x}_{n,n}) = \\frac{VAR(\hat{x}_{n,n})}{\sigma^2_x}
641
642
VRF(\hat{\dot{x}}_{n,n}) = \\frac{VAR(\hat{\dot{x}}_{n,n})}{\sigma^2_x}
643
644
VRF(\hat{\ddot{x}}_{n,n}) = \\frac{VAR(\hat{\ddot{x}}_{n,n})}{\sigma^2_x}
645
646
**Returns**
647
648
vrf_x : type(x)
649
VRF of x state variable
650
651
vrf_dx : type(x)
652
VRF of the dx state variable (derivative of x)
653
654
vrf_ddx : type(x)
655
VRF of the ddx state variable (second derivative of x)
656
"""
657
658
g = self.g
659
h = self.h
660
k = self.k
661
662
# common subexpressions in the equations pulled out for efficiency,
663
# they don't 'mean' anything.
664
hg4 = 4- 2*g - h
665
ghk = g*h + g*k - 2*k
666
667
vx = (2*h*(2*(g**2) + 2*h - 3*g*h) - 2*g*k*hg4) / (2*k - g*(h+k) * hg4)
668
vdx = (2*(h**3) - 4*(h**2)*k + 4*(k**2)*(2-g)) / (2*hg4*ghk)
669
vddx = 8*h*(k**2) / ((self.dt**4)*hg4*ghk)
670
671
return (vx, vdx, vddx)
672
673
674
def optimal_noise_smoothing(g):
675
""" provides g,h,k parameters for optimal smoothing of noise for a given
676
value of g. This is due to Polge and Bhagavan[1].
677
678
**Parameters**
679
680
g : float
681
value for g for which we will optimize for
682
683
**Returns**
684
685
(g,h,k) : (float, float, float)
686
values for g,h,k that provide optimal smoothing of noise
687
688
689
**Example**::
690
691
from filterpy.gh import GHKFilter, optimal_noise_smoothing
692
693
g,h,k = optimal_noise_smoothing(g)
694
f = GHKFilter(0,0,0,1,g,h,k)
695
f.update(1.)
696
697
698
**References**
699
700
[1] Polge and Bhagavan. "A Study of the g-h-k Tracking Filter".
701
Report No. RE-CR-76-1. University of Alabama in Huntsville.
702
July, 1975
703
704
"""
705
706
h = ((2*g**3 - 4*g**2) + (4*g**6 -64*g**5 + 64*g**4)**.5) / (8*(1-g))
707
k = (h*(2-g) - g**2) / g
708
709
return (g,h,k)
710
711
712
def least_squares_parameters(n):
713
""" An order 1 least squared filter can be computed by a g-h filter
714
by varying g and h over time according to the formulas below, where
715
the first measurement is at n=0, the second is at n=1, and so on:
716
717
.. math::
718
719
h_n = \\frac{6}{(n+2)(n+1)}
720
721
g_n = \\frac{2(2n+1)}{(n+2)(n+1)}
722
723
724
725
**Parameters**
726
727
n : int
728
the nth measurement, starting at 0 (i.e. first measurement has n==0)
729
730
**Returns**
731
732
(g,h) : (float, float)
733
g and h parameters for this time step for the least-squares filter
734
735
**Example**::
736
737
from filterpy.gh import GHFilter, least_squares_parameters
738
739
lsf = GHFilter (0, 0, 1, 0, 0)
740
z = 10
741
for i in range(10):
742
g,h = least_squares_parameters(i)
743
lsf.update(z, g, h)
744
745
"""
746
den = (n+2)*(n+1)
747
748
g = (2*(2*n + 1)) / den
749
h = 6 / den
750
return (g,h)
751
752
753
def critical_damping_parameters(theta, order=2):
754
""" Computes values for g and h (and k for g-h-k filter) for a
755
critically damped filter.
756
757
The idea here is to create a filter that reduces the influence of
758
old data as new data comes in. This allows the filter to track a
759
moving target better. This goes by different names. It may be called the
760
discounted least-squares g-h filter, a fading-memory polynomal filter
761
of order 1, or a critically damped g-h filter.
762
763
In a normal least-squares filter we compute the error for each point as
764
765
.. math::
766
767
\epsilon_t = (z-\\hat{x})^2
768
769
For a crically damped filter we reduce the influence of each error by
770
771
.. math::
772
773
\\theta^{t-i}
774
775
where
776
777
.. math::
778
779
0 <= \\theta <= 1
780
781
In other words the last error is scaled by theta, the next to last by
782
theta squared, the next by theta cubed, and so on.
783
784
**Parameters**
785
786
theta : float, 0 <= theta <= 1
787
scaling factor for previous terms
788
789
order : int, 2 (default) or 3
790
order of filter to create the parameters for. g and h will be
791
calculated for the order 2, and g, h, and k for order 3.
792
793
**Returns**
794
795
g : scalar
796
optimal value for g in the g-h or g-h-k filter
797
798
h : scalar
799
optimal value for h in the g-h or g-h-k filter
800
801
k : scalar
802
optimal value for g in the g-h-k filter
803
804
**Example**::
805
806
from filterpy.gh import GHFilter, critical_damping_parameters
807
808
g,h = critical_damping_parameters(0.3)
809
critical_filter = GHFilter(0, 0, 1, g, h)
810
811
**References**
812
813
Brookner, "Tracking and Kalman Filters Made Easy". John Wiley and
814
Sons, 1998.
815
816
Polge and Bhagavan. "A Study of the g-h-k Tracking Filter".
817
Report No. RE-CR-76-1. University of Alabama in Huntsville.
818
July, 1975
819
820
"""
821
assert theta >= 0
822
assert theta <= 1
823
824
if order == 2:
825
return (1. - theta**2, (1. - theta)**2)
826
827
if order == 3:
828
return (1. - theta**3, 1.5*(1.-theta**2)*(1.-theta), .5*(1 - theta)**3)
829
830
raise Exception('bad order specified: {}'.format(order))
831
832
833
def benedict_bornder_constants(g, critical=False):
834
""" Computes the g,h constants for a Benedict-Bordner filter, which
835
minimizes transient errors for a g-h filter.
836
837
Returns the values g,h for a specified g. Strictly speaking, only h
838
is computed, g is returned unchanged.
839
840
The default formula for the Benedict-Bordner allows ringing. We can
841
"nearly" critically damp it; ringing will be reduced, but not entirely
842
eliminated at the cost of reduced performance.
843
844
**Parameters**
845
846
g : float
847
scaling factor g for the filter
848
849
critical : boolean, default False
850
Attempts to critically damp the filter.
851
852
**Returns**
853
854
g : float
855
scaling factor g (same as the g that was passed in)
856
857
h : float
858
scaling factor h that minimizes the transient errors
859
860
**Example**::
861
862
from filterpy.gh import GHFilter, benedict_bornder_constants
863
g, h = benedict_bornder_constants(.855)
864
f = GHFilter(0, 0, 1, g, h)
865
866
**References**
867
868
Brookner, "Tracking and Kalman Filters Made Easy". John Wiley and
869
Sons, 1998.
870
871
"""
872
873
g_sqr = g**2
874
if critical:
875
return (g, 0.8 * (2. - g_sqr - 2*(1-g_sqr)**.5) / g_sqr)
876
else:
877
return (g, g_sqr / (2.-g))
878
879