|
5 | 5 | """
|
6 | 6 | from __future__ import absolute_import, division
|
7 | 7 |
|
8 |
| -from abc import abstractmethod |
9 | 8 | import itertools
|
| 9 | +from abc import abstractmethod |
10 | 10 | from warnings import warn
|
11 | 11 |
|
12 | 12 | import numpy as np
|
|
15 | 15 |
|
16 | 16 | from arch.compat.python import add_metaclass, range
|
17 | 17 | from arch.univariate.distribution import Normal
|
18 |
| -from arch.utility.exceptions import initial_value_warning, InitialValueWarning |
19 | 18 | from arch.utility.array import ensure1d, AbstractDocStringInheritor
|
| 19 | +from arch.utility.exceptions import initial_value_warning, InitialValueWarning |
20 | 20 |
|
21 | 21 | try:
|
22 | 22 | from arch.univariate.recursions import (garch_recursion, harch_recursion,
|
@@ -944,8 +944,7 @@ def starting_values(self, resids):
|
944 | 944 | if q > 0:
|
945 | 945 | sv[1 + p + o:1 + p + o + q] = agb / q
|
946 | 946 | svs.append(sv)
|
947 |
| - llfs[i] = self._gaussian_loglikelihood(sv, resids, backcast, |
948 |
| - var_bounds) |
| 947 | + llfs[i] = self._gaussian_loglikelihood(sv, resids, backcast, var_bounds) |
949 | 948 | loc = np.argmax(llfs)
|
950 | 949 |
|
951 | 950 | return svs[int(loc)]
|
@@ -2341,3 +2340,163 @@ def _simulation_forecast(self, parameters, resids, backcast, var_bounds, start,
|
2341 | 2340 | shocks = np.full((t, simulations, horizon), np.nan)
|
2342 | 2341 |
|
2343 | 2342 | return VarianceForecast(forecasts, forecast_paths, shocks)
|
| 2343 | + |
| 2344 | + |
| 2345 | +class VarianceTargetingGARCH(GARCH): |
| 2346 | + r""" |
| 2347 | + GARCH and related model estimation |
| 2348 | +
|
| 2349 | + The following models can be specified using GARCH: |
| 2350 | + * ARCH(p) |
| 2351 | + * GARCH(p,q) |
| 2352 | + * GJR-GARCH(p,o,q) |
| 2353 | + * AVARCH(p) |
| 2354 | + * AVGARCH(p,q) |
| 2355 | + * TARCH(p,o,q) |
| 2356 | + * Models with arbitrary, pre-specified powers |
| 2357 | +
|
| 2358 | + Parameters |
| 2359 | + ---------- |
| 2360 | + p : int |
| 2361 | + Order of the symmetric innovation |
| 2362 | + o : int |
| 2363 | + Order of the asymmetric innovation |
| 2364 | + q: int |
| 2365 | + Order of the lagged (transformed) conditional variance |
| 2366 | +
|
| 2367 | + Attributes |
| 2368 | + ---------- |
| 2369 | + num_params : int |
| 2370 | + The number of parameters in the model |
| 2371 | +
|
| 2372 | + Examples |
| 2373 | + -------- |
| 2374 | + >>> from arch.univariate import VarianceTargetingGARCH |
| 2375 | +
|
| 2376 | + Standard GARCH(1,1) with targeting |
| 2377 | +
|
| 2378 | + >>> vt = VarianceTargetingGARCH(p=1, q=1) |
| 2379 | +
|
| 2380 | + Asymmetric GJR-GARCH process with targeting |
| 2381 | +
|
| 2382 | + >>> vt = VarianceTargetingGARCH(p=1, o=1, q=1) |
| 2383 | +
|
| 2384 | + Notes |
| 2385 | + ----- |
| 2386 | + In this class of processes, the variance dynamics are |
| 2387 | +
|
| 2388 | + .. math:: |
| 2389 | +
|
| 2390 | + \sigma_{t}^{\lambda}= |
| 2391 | + bar{\omega}(1-\sum_{i=1}^{p}\alpha_{i} |
| 2392 | + - \frac{1}{2}\sum_{j=1}^{o}\gamma_{j} |
| 2393 | + - \sum_{k=1}^{q}\beta_{k}) |
| 2394 | + + \sum_{i=1}^{p}\alpha_{i}\left|\epsilon_{t-i}\right|^{\lambda} |
| 2395 | + +\sum_{j=1}^{o}\gamma_{j}\left|\epsilon_{t-j}\right|^{\lambda} |
| 2396 | + I\left[\epsilon_{t-j}<0\right]+\sum_{k=1}^{q}\beta_{k}\sigma_{t-k}^{\lambda} |
| 2397 | + """ |
| 2398 | + |
| 2399 | + def __init__(self, p=1, o=0, q=1): |
| 2400 | + super(VarianceTargetingGARCH, self).__init__() |
| 2401 | + self.p = int(p) |
| 2402 | + self.o = int(o) |
| 2403 | + self.q = int(q) |
| 2404 | + self.num_params = p + o + q |
| 2405 | + if p < 0 or o < 0 or q < 0: |
| 2406 | + raise ValueError('All lags lengths must be non-negative') |
| 2407 | + if p == 0 and o == 0: |
| 2408 | + raise ValueError('One of p or o must be strictly positive') |
| 2409 | + self.name = 'Variance Targeting ' + self._name() |
| 2410 | + |
| 2411 | + def bounds(self, resids): |
| 2412 | + bounds = super(VarianceTargetingGARCH, self).bounds(resids) |
| 2413 | + return bounds[1:] |
| 2414 | + |
| 2415 | + def constraints(self): |
| 2416 | + a, b = super(VarianceTargetingGARCH, self).constraints() |
| 2417 | + a = a[1:, 1:] |
| 2418 | + b = b[1:] |
| 2419 | + return a, b |
| 2420 | + |
| 2421 | + def compute_variance(self, parameters, resids, sigma2, backcast, |
| 2422 | + var_bounds): |
| 2423 | + |
| 2424 | + # Add target |
| 2425 | + target = (resids ** 2).mean() |
| 2426 | + abar = parameters[:self.p].sum() |
| 2427 | + gbar = parameters[self.p:self.p + self.o].sum() |
| 2428 | + bbar = parameters[self.p + self.o:].sum() |
| 2429 | + omega = target * (1 - abar - 0.5 * gbar - bbar) |
| 2430 | + omega = max(omega, np.finfo(np.double).eps) |
| 2431 | + parameters = np.r_[omega, parameters] |
| 2432 | + |
| 2433 | + fresids = np.abs(resids) ** 2.0 |
| 2434 | + sresids = np.sign(resids) |
| 2435 | + |
| 2436 | + p, o, q = self.p, self.o, self.q |
| 2437 | + nobs = resids.shape[0] |
| 2438 | + |
| 2439 | + garch_recursion(parameters, fresids, sresids, sigma2, p, o, q, nobs, |
| 2440 | + backcast, var_bounds) |
| 2441 | + return sigma2 |
| 2442 | + |
| 2443 | + def simulate(self, parameters, nobs, rng, burn=500, initial_value=None): |
| 2444 | + if initial_value is None: |
| 2445 | + initial_value = parameters[0] |
| 2446 | + |
| 2447 | + parameters = self._targeting_to_stangard_garch(parameters) |
| 2448 | + return super(VarianceTargetingGARCH, self).simulate(parameters, nobs, rng, burn=burn, |
| 2449 | + initial_value=initial_value) |
| 2450 | + |
| 2451 | + def _targeting_to_stangard_garch(self, parameters): |
| 2452 | + p, o = self.p, self.o |
| 2453 | + abar = parameters[:p].sum() |
| 2454 | + gbar = parameters[p:p + o].sum() |
| 2455 | + bbar = parameters[p + o:].sum() |
| 2456 | + const = parameters[0](1 - abar - 0.5 * gbar - bbar) |
| 2457 | + return np.r_[const, parameters] |
| 2458 | + |
| 2459 | + def parameter_names(self): |
| 2460 | + return _common_names(self.p, self.o, self.q)[1:] |
| 2461 | + |
| 2462 | + def _analytic_forecast(self, parameters, resids, backcast, var_bounds, start, horizon): |
| 2463 | + parameters = self._targeting_to_stangard_garch(parameters) |
| 2464 | + return super(VarianceTargetingGARCH, self)._analytic_forecast(parameters, resids, |
| 2465 | + backcast, var_bounds, |
| 2466 | + start, horizon) |
| 2467 | + |
| 2468 | + def _simulation_forecast(self, parameters, resids, backcast, var_bounds, start, horizon, |
| 2469 | + simulations, rng): |
| 2470 | + parameters = self._targeting_to_stangard_garch(parameters) |
| 2471 | + return super(VarianceTargetingGARCH, self)._simulation_forecast(parameters, resids, |
| 2472 | + backcast, var_bounds, |
| 2473 | + start, horizon, |
| 2474 | + simulations, rng) |
| 2475 | + |
| 2476 | + def starting_values(self, resids): |
| 2477 | + p, o, q = self.p, self.o, self.q |
| 2478 | + alphas = [.01, .05, .1, .2] |
| 2479 | + gammas = alphas |
| 2480 | + abg = [.5, .7, .9, .98] |
| 2481 | + abgs = list(itertools.product(*[alphas, gammas, abg])) |
| 2482 | + |
| 2483 | + svs = [] |
| 2484 | + var_bounds = self.variance_bounds(resids) |
| 2485 | + backcast = self.backcast(resids) |
| 2486 | + llfs = np.zeros(len(abgs)) |
| 2487 | + for i, values in enumerate(abgs): |
| 2488 | + alpha, gamma, agb = values |
| 2489 | + sv = np.ones(p + o + q) |
| 2490 | + if p > 0: |
| 2491 | + sv[:p] = alpha / p |
| 2492 | + agb -= alpha |
| 2493 | + if o > 0: |
| 2494 | + sv[p: p + o] = gamma / o |
| 2495 | + agb -= gamma / 2.0 |
| 2496 | + if q > 0: |
| 2497 | + sv[p + o:] = agb / q |
| 2498 | + svs.append(sv) |
| 2499 | + llfs[i] = self._gaussian_loglikelihood(sv, resids, backcast, var_bounds) |
| 2500 | + loc = np.argmax(llfs) |
| 2501 | + |
| 2502 | + return svs[int(loc)] |
0 commit comments