Skip to content

Commit 1de062e

Browse files
committed
ENH: Add variance targetting GARCH model
Add a VT GARCH model Note that inference isn't correct
1 parent a1a73a6 commit 1de062e

File tree

4 files changed

+165
-5
lines changed

4 files changed

+165
-5
lines changed

arch/bootstrap/_samplers.pyx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,8 @@
22
#cython: language_level=3, boundscheck=False, wraparound=False, cdivision=True
33

44
import numpy as np
5+
56
cimport numpy as np
6-
cimport cython
77

88
def stationary_bootstrap_sample(np.int64_t[:] indices,
99
double[:] u,

arch/univariate/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,12 +3,12 @@
33
from arch.univariate.mean import HARX, ConstantMean, ZeroMean, ARX, arch_model, LS
44
from arch.univariate.volatility import (GARCH, ARCH, HARCH, ConstantVariance, EWMAVariance,
55
RiskMetrics2006, EGARCH, FixedVariance, MIDASHyperbolic,
6-
FIGARCH)
6+
FIGARCH, VarianceTargetingGARCH)
77
from arch.univariate.distribution import (Distribution, Normal, StudentsT, SkewStudent,
88
GeneralizedError)
99

1010
__all__ = ['HARX', 'ConstantMean', 'ZeroMean', 'ARX', 'arch_model', 'LS',
1111
'GARCH', 'ARCH', 'HARCH', 'ConstantVariance',
12-
'EWMAVariance', 'RiskMetrics2006', 'EGARCH',
12+
'EWMAVariance', 'RiskMetrics2006', 'EGARCH', 'VarianceTargetingGARCH',
1313
'Distribution', 'Normal', 'StudentsT', 'SkewStudent', 'GeneralizedError',
1414
'FixedVariance', 'MIDASHyperbolic', 'FIGARCH']

arch/univariate/recursions.pyx

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
#cython: language_level=3, boundscheck=False, wraparound=False, cdivision=True
33

44
import numpy as np
5+
56
cimport numpy as np
67

78
__all__ = ['harch_recursion', 'arch_recursion', 'garch_recursion', 'egarch_recursion',

arch/univariate/volatility.py

Lines changed: 161 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -948,8 +948,7 @@ def starting_values(self, resids):
948948
if q > 0:
949949
sv[1 + p + o:1 + p + o + q] = agb / q
950950
svs.append(sv)
951-
llfs[i] = self._gaussian_loglikelihood(sv, resids, backcast,
952-
var_bounds)
951+
llfs[i] = self._gaussian_loglikelihood(sv, resids, backcast, var_bounds)
953952
loc = np.argmax(llfs)
954953

955954
return svs[int(loc)]
@@ -2702,3 +2701,163 @@ def _simulation_forecast(self, parameters, resids, backcast, var_bounds, start,
27022701

27032702
forecasts[:start] = np.nan
27042703
return VarianceForecast(forecasts, paths, shocks)
2704+
2705+
2706+
class VarianceTargetingGARCH(GARCH):
2707+
r"""
2708+
GARCH and related model estimation
2709+
2710+
The following models can be specified using GARCH:
2711+
* ARCH(p)
2712+
* GARCH(p,q)
2713+
* GJR-GARCH(p,o,q)
2714+
* AVARCH(p)
2715+
* AVGARCH(p,q)
2716+
* TARCH(p,o,q)
2717+
* Models with arbitrary, pre-specified powers
2718+
2719+
Parameters
2720+
----------
2721+
p : int
2722+
Order of the symmetric innovation
2723+
o : int
2724+
Order of the asymmetric innovation
2725+
q: int
2726+
Order of the lagged (transformed) conditional variance
2727+
2728+
Attributes
2729+
----------
2730+
num_params : int
2731+
The number of parameters in the model
2732+
2733+
Examples
2734+
--------
2735+
>>> from arch.univariate import VarianceTargetingGARCH
2736+
2737+
Standard GARCH(1,1) with targeting
2738+
2739+
>>> vt = VarianceTargetingGARCH(p=1, q=1)
2740+
2741+
Asymmetric GJR-GARCH process with targeting
2742+
2743+
>>> vt = VarianceTargetingGARCH(p=1, o=1, q=1)
2744+
2745+
Notes
2746+
-----
2747+
In this class of processes, the variance dynamics are
2748+
2749+
.. math::
2750+
2751+
\sigma_{t}^{\lambda}=
2752+
bar{\omega}(1-\sum_{i=1}^{p}\alpha_{i}
2753+
- \frac{1}{2}\sum_{j=1}^{o}\gamma_{j}
2754+
- \sum_{k=1}^{q}\beta_{k})
2755+
+ \sum_{i=1}^{p}\alpha_{i}\left|\epsilon_{t-i}\right|^{\lambda}
2756+
+\sum_{j=1}^{o}\gamma_{j}\left|\epsilon_{t-j}\right|^{\lambda}
2757+
I\left[\epsilon_{t-j}<0\right]+\sum_{k=1}^{q}\beta_{k}\sigma_{t-k}^{\lambda}
2758+
"""
2759+
2760+
def __init__(self, p=1, o=0, q=1):
2761+
super(VarianceTargetingGARCH, self).__init__()
2762+
self.p = int(p)
2763+
self.o = int(o)
2764+
self.q = int(q)
2765+
self.num_params = p + o + q
2766+
if p < 0 or o < 0 or q < 0:
2767+
raise ValueError('All lags lengths must be non-negative')
2768+
if p == 0 and o == 0:
2769+
raise ValueError('One of p or o must be strictly positive')
2770+
self.name = 'Variance Targeting ' + self._name()
2771+
2772+
def bounds(self, resids):
2773+
bounds = super(VarianceTargetingGARCH, self).bounds(resids)
2774+
return bounds[1:]
2775+
2776+
def constraints(self):
2777+
a, b = super(VarianceTargetingGARCH, self).constraints()
2778+
a = a[1:, 1:]
2779+
b = b[1:]
2780+
return a, b
2781+
2782+
def compute_variance(self, parameters, resids, sigma2, backcast,
2783+
var_bounds):
2784+
2785+
# Add target
2786+
target = (resids ** 2).mean()
2787+
abar = parameters[:self.p].sum()
2788+
gbar = parameters[self.p:self.p + self.o].sum()
2789+
bbar = parameters[self.p + self.o:].sum()
2790+
omega = target * (1 - abar - 0.5 * gbar - bbar)
2791+
omega = max(omega, np.finfo(np.double).eps)
2792+
parameters = np.r_[omega, parameters]
2793+
2794+
fresids = np.abs(resids) ** 2.0
2795+
sresids = np.sign(resids)
2796+
2797+
p, o, q = self.p, self.o, self.q
2798+
nobs = resids.shape[0]
2799+
2800+
garch_recursion(parameters, fresids, sresids, sigma2, p, o, q, nobs,
2801+
backcast, var_bounds)
2802+
return sigma2
2803+
2804+
def simulate(self, parameters, nobs, rng, burn=500, initial_value=None):
2805+
if initial_value is None:
2806+
initial_value = parameters[0]
2807+
2808+
parameters = self._targeting_to_stangard_garch(parameters)
2809+
return super(VarianceTargetingGARCH, self).simulate(parameters, nobs, rng, burn=burn,
2810+
initial_value=initial_value)
2811+
2812+
def _targeting_to_stangard_garch(self, parameters):
2813+
p, o = self.p, self.o
2814+
abar = parameters[:p].sum()
2815+
gbar = parameters[p:p + o].sum()
2816+
bbar = parameters[p + o:].sum()
2817+
const = parameters[0](1 - abar - 0.5 * gbar - bbar)
2818+
return np.r_[const, parameters]
2819+
2820+
def parameter_names(self):
2821+
return _common_names(self.p, self.o, self.q)[1:]
2822+
2823+
def _analytic_forecast(self, parameters, resids, backcast, var_bounds, start, horizon):
2824+
parameters = self._targeting_to_stangard_garch(parameters)
2825+
return super(VarianceTargetingGARCH, self)._analytic_forecast(parameters, resids,
2826+
backcast, var_bounds,
2827+
start, horizon)
2828+
2829+
def _simulation_forecast(self, parameters, resids, backcast, var_bounds, start, horizon,
2830+
simulations, rng):
2831+
parameters = self._targeting_to_stangard_garch(parameters)
2832+
return super(VarianceTargetingGARCH, self)._simulation_forecast(parameters, resids,
2833+
backcast, var_bounds,
2834+
start, horizon,
2835+
simulations, rng)
2836+
2837+
def starting_values(self, resids):
2838+
p, o, q = self.p, self.o, self.q
2839+
alphas = [.01, .05, .1, .2]
2840+
gammas = alphas
2841+
abg = [.5, .7, .9, .98]
2842+
abgs = list(itertools.product(*[alphas, gammas, abg]))
2843+
2844+
svs = []
2845+
var_bounds = self.variance_bounds(resids)
2846+
backcast = self.backcast(resids)
2847+
llfs = np.zeros(len(abgs))
2848+
for i, values in enumerate(abgs):
2849+
alpha, gamma, agb = values
2850+
sv = np.ones(p + o + q)
2851+
if p > 0:
2852+
sv[:p] = alpha / p
2853+
agb -= alpha
2854+
if o > 0:
2855+
sv[p: p + o] = gamma / o
2856+
agb -= gamma / 2.0
2857+
if q > 0:
2858+
sv[p + o:] = agb / q
2859+
svs.append(sv)
2860+
llfs[i] = self._gaussian_loglikelihood(sv, resids, backcast, var_bounds)
2861+
loc = np.argmax(llfs)
2862+
2863+
return svs[int(loc)]

0 commit comments

Comments
 (0)