PK MFh example_simple_fit.py"""
=======================
Simple auto_arima model
=======================
This is a simple example of how we can fit an ARIMA model in several lines
without knowing anything about our data or optimal hyper parameters.
.. raw:: html
"""
print(__doc__)
# Author: Taylor Smith
import pyramid as pm
import numpy as np
from matplotlib import pyplot as plt
# #############################################################################
# Load the data and split it into separate pieces
data = pm.datasets.load_wineind()
train, test = data[:150], data[150:]
# Fit a simple auto_arima model
arima = pm.auto_arima(train, error_action='ignore', trace=1,
seasonal=True, m=12)
# #############################################################################
# Plot actual test vs. forecasts:
x = np.arange(test.shape[0])
plt.scatter(x, test, marker='x')
plt.plot(x, arima.predict(n_periods=test.shape[0]))
plt.title('Actual test samples vs. forecasts')
plt.show()
PK ֜M2@h h arima/example_auto_arima.py"""
===========================
Fitting an auto_arima model
===========================
This example demonstrates how we can use the ``auto_arima`` function to
select an optimal time series model. We'll be fitting our model on the lynx
dataset available in the :ref:`datasets` submodule.
.. raw:: html
"""
print(__doc__)
# Author: Taylor Smith
from pyramid.datasets import load_lynx
from pyramid.arima import auto_arima
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import numpy as np
# #############################################################################
# Load the data and split it into separate pieces
data = load_lynx()
train, test = data[:90], data[90:]
# Fit a simple auto_arima model
modl = auto_arima(train, start_p=1, start_q=1, start_P=1, start_Q=1,
max_p=5, max_q=5, max_P=5, max_Q=5, seasonal=True,
stepwise=True, suppress_warnings=True, D=10, max_D=10,
error_action='ignore')
# Create predictions for the future, evaluate on test
preds, conf_int = modl.predict(n_periods=test.shape[0], return_conf_int=True)
# Print the error:
print("Test RMSE: %.3f" % np.sqrt(mean_squared_error(test, preds)))
# #############################################################################
# Plot the points and the forecasts
x_axis = np.arange(train.shape[0] + preds.shape[0])
x_years = x_axis + 1821 # Year starts at 1821
plt.plot(x_years[x_axis[:train.shape[0]]], train, alpha=0.75)
plt.plot(x_years[x_axis[train.shape[0]:]], preds, alpha=0.75) # Forecasts
plt.scatter(x_years[x_axis[train.shape[0]:]], test,
alpha=0.4, marker='x') # Test data
plt.fill_between(x_years[x_axis[-preds.shape[0]:]],
conf_int[:, 0], conf_int[:, 1],
alpha=0.1, color='b')
plt.title("Lynx forecasts")
plt.xlabel("Year")
PK לM4q arima/example_add_new_samples.py"""
=====================================
Adding new observations to your model
=====================================
This example demonstrates how to add new ground truth
observations to your model so that forecasting continues
with respect to true, observed values.
.. raw:: html
"""
print(__doc__)
# Author: Taylor Smith
from pyramid.datasets import load_lynx
from pyramid.arima import auto_arima
import matplotlib.pyplot as plt
import numpy as np
# #############################################################################
# Load the data and split it into separate pieces
data = load_lynx()
train, test = data[:100], data[100:]
# #############################################################################
# Fit with some validation (cv) samples
arima = auto_arima(train, start_p=1, start_q=1, d=0, max_p=5, max_q=5,
out_of_sample_size=10, suppress_warnings=True,
stepwise=True, error_action='ignore')
# Now plot the results and the forecast for the test set
preds, conf_int = arima.predict(n_periods=test.shape[0],
return_conf_int=True)
fig, axes = plt.subplots(2, 1, figsize=(12, 8))
x_axis = np.arange(train.shape[0] + preds.shape[0])
axes[0].plot(x_axis[:train.shape[0]], train, alpha=0.75)
axes[0].scatter(x_axis[train.shape[0]:], preds, alpha=0.4, marker='o')
axes[0].scatter(x_axis[train.shape[0]:], test, alpha=0.4, marker='x')
axes[0].fill_between(x_axis[-preds.shape[0]:], conf_int[:, 0], conf_int[:, 1],
alpha=0.1, color='b')
# fill the section where we "held out" samples in our model fit
axes[0].set_title("Train samples & forecasted test samples")
# Now add the actual samples to the model and create NEW forecasts
arima.add_new_observations(test)
new_preds, new_conf_int = arima.predict(n_periods=10, return_conf_int=True)
new_x_axis = np.arange(data.shape[0] + 10)
axes[1].plot(new_x_axis[:data.shape[0]], data, alpha=0.75)
axes[1].scatter(new_x_axis[data.shape[0]:], new_preds, alpha=0.4, marker='o')
axes[1].fill_between(new_x_axis[-new_preds.shape[0]:],
new_conf_int[:, 0],
new_conf_int[:, 1],
alpha=0.1, color='g')
axes[1].set_title("Added new observed values with new forecasts")
plt.show()
PK ֜MH # arima/example_persisting_a_model.py"""
=========================
Persisting an ARIMA model
=========================
This example demonstrates how we can persist an ARIMA model to disk after
fitting it. It can then be loaded back up and used to generate forecasts.
.. raw:: html
"""
print(__doc__)
# Author: Taylor Smith
import pyramid as pm
from pyramid.datasets import load_wineind
from sklearn.externals import joblib # for persistence
import os
# #############################################################################
# Load the data and split it into separate pieces
y = load_wineind()
train, test = y[:125], y[125:]
# Fit an ARIMA
arima = pm.ARIMA(order=(1, 1, 2), seasonal_order=(0, 1, 1, 12))
arima.fit(y)
# #############################################################################
# Persist a model and create predictions after re-loading it
pickle_tgt = "arima.pkl"
try:
# Pickle it
joblib.dump(arima, pickle_tgt, compress=3)
# Load the model up, create predictions
arima_loaded = joblib.load(pickle_tgt)
preds = arima_loaded.predict(n_periods=test.shape[0])
print("Predictions: %r" % preds)
finally:
# Remove the pickle file at the end of this example
try:
os.unlink(pickle_tgt)
except OSError:
pass
PK לMa a datasets/example_load_data.py"""
===============
Dataset loading
===============
In this example, we demonstrate pyramid's built-in toy datasets that can be
used for benchmarking or experimentation. Pyramid has several built-in datasets
that exhibit seasonality, non-stationarity, and other time series nuances.
.. raw:: html
"""
print(__doc__)
# Author: Taylor Smith
import pyramid as pm
# #############################################################################
# You can load the datasets via load_
lynx = pm.datasets.load_lynx()
print("Lynx array:")
print(lynx)
# You can also get a series, if you rather
print("\nLynx series head:")
print(pm.datasets.load_lynx(as_series=True).head())
# Several other datasets:
heart_rate = pm.datasets.load_heartrate()
wineind = pm.datasets.load_wineind()
woolyrnq = pm.datasets.load_woolyrnq()
PK לMcY Y $ utils/example_array_concatenation.py"""
===================
Array concatenation
===================
In this example, we demonstrate pyramid's convenient ``c`` function, which is,
in essence, the same as R's. It's nothing more than a convenience function in
the package, but one you should understand if you're contributing.
.. raw:: html
"""
print(__doc__)
# Author: Taylor Smith
import pyramid as pm
import numpy as np
# #############################################################################
# You can use the 'c' function to define an array from *args
array1 = pm.c(1, 2, 3, 4, 5)
# Or you can define an array from an existing iterable:
array2 = pm.c([1, 2, 3, 4, 5])
assert np.array_equal(array1, array2)
# You can even use 'c' to flatten arrays:
array_flat = pm.c(1, 2, 3, [4, 5])
assert np.array_equal(array_flat, np.arange(5) + 1)
PK MFh example_simple_fit.pyPK ֜M2@h h 4 arima/example_auto_arima.pyPK לM4q arima/example_add_new_samples.pyPK ֜MH # . arima/example_persisting_a_model.pyPK לMa a z datasets/example_load_data.pyPK לMcY Y $ utils/example_array_concatenation.pyPK !