|
@@ -1,83 +1,91 @@
|
|
|
-% Generated by roxygen2: do not edit by hand
|
|
|
-% Please edit documentation in R/metrics.R
|
|
|
-\name{metrics}
|
|
|
-\alias{metrics}
|
|
|
-\alias{me}
|
|
|
-\alias{mse}
|
|
|
-\alias{rmse}
|
|
|
-\alias{mae}
|
|
|
-\alias{mpe}
|
|
|
-\alias{mape}
|
|
|
-\alias{all_metrics}
|
|
|
-\title{Metrics for Time Series Forecasts}
|
|
|
-\usage{
|
|
|
-me(fcst)
|
|
|
-
|
|
|
-mse(fcst)
|
|
|
-
|
|
|
-rmse(fcst)
|
|
|
-
|
|
|
-mae(fcst)
|
|
|
-
|
|
|
-mpe(fcst)
|
|
|
-
|
|
|
-mape(fcst)
|
|
|
-
|
|
|
-all_metrics(fcst)
|
|
|
-}
|
|
|
-\arguments{
|
|
|
-\item{fcst}{Dataframe output of `predict`.}
|
|
|
-}
|
|
|
-\value{
|
|
|
-metrics value (numeric)
|
|
|
-}
|
|
|
-\description{
|
|
|
-A time-series forecast requires making a quantitative prediction of future values.
|
|
|
-After forecast, we also have to provide accurracy of forecasts to check wether the forecast serves our need.
|
|
|
-Metrics for time series forecasts are so useful in telling you how your model is good and helping you determine which particular forecasting models work best.
|
|
|
-}
|
|
|
-\details{
|
|
|
-Here, as a notation, we assume that \eqn{y} is the actual value and \eqn{yhat} is the forecast value.
|
|
|
-
|
|
|
-Mean Error (ME, \code{me})
|
|
|
-
|
|
|
-The Mean Error (ME) is defined by the formula:
|
|
|
-\deqn{ \frac{1}{n} \sum_{t=1}^{n} y_{t}-yhat_{t} .}
|
|
|
-
|
|
|
-Mean Squared Error (MSE, \code{mse})
|
|
|
-
|
|
|
-The Mean Squared Error (MSE) is defined by the formula:
|
|
|
-\deqn{ \frac{1}{n} \sum_{t=1}^{n} (y_{t}-yhat_{t})^2 .}
|
|
|
-
|
|
|
-Root Mean Square Error (RMSE, \code{rmse})
|
|
|
-
|
|
|
-Root Mean Square Error (RMSE) is define by the formula:
|
|
|
-\deqn{ \sqrt{\frac{1}{n} \sum_{t=1}^{n} (y_{t}-yhat_{t})^2} .}
|
|
|
-
|
|
|
-Mean Absolute Error (MAE, \code{mae})
|
|
|
-
|
|
|
-The Mean Absolute Error (MAE) is defined by the formula:
|
|
|
-\deqn{ \frac{1}{n} \sum_{t=1}^{n} | y_{t}-yhat_{t} | .}
|
|
|
-
|
|
|
-Mean Percentage Error (MPE, \code{mpe})
|
|
|
-
|
|
|
-The Mean Percentage Error (MPE) is usually expressed as a percentage
|
|
|
-and is defined by the formula:
|
|
|
-\deqn{ \frac{100}{n} \sum_{t=1}^{n} \frac {y_{t}-yhat_{t}}{y_{t}} .}
|
|
|
-
|
|
|
-Mean Absolute Percentage Error (MAPE, \code{mape})
|
|
|
-
|
|
|
-The Mean absolute Percentage Error (MAPE), also known as Mean Absolute Percentage Deviation (MAPD), is usually expressed as a percentage,
|
|
|
-and is defined by the formula:
|
|
|
-\deqn{ \frac{100}{n} \sum_{t=1}^{n} | \frac {y_{t}-yhat_{t}}{y_{t}}| .}
|
|
|
-}
|
|
|
-\examples{
|
|
|
-\dontrun{
|
|
|
-# Create example model
|
|
|
-library(readr)
|
|
|
-df <- read_csv('../tests/testthat/data.csv')
|
|
|
-m <- prophet(df)
|
|
|
-# You can check your models's accuracy using me, mse, rmse ...etc.
|
|
|
-print(rmse(m))
|
|
|
-}
|
|
|
-}
|
|
|
+% Generated by roxygen2: do not edit by hand
|
|
|
+% Please edit documentation in R/metrics.R
|
|
|
+\name{metrics}
|
|
|
+\alias{metrics}
|
|
|
+\alias{me}
|
|
|
+\alias{mse}
|
|
|
+\alias{rmse}
|
|
|
+\alias{mae}
|
|
|
+\alias{mpe}
|
|
|
+\alias{mape}
|
|
|
+\alias{all_metrics}
|
|
|
+\title{Metrics for Time Series Forecasts}
|
|
|
+\usage{
|
|
|
+me(m = NULL, df = NULL)
|
|
|
+
|
|
|
+mse(m = NULL, df = NULL)
|
|
|
+
|
|
|
+rmse(m = NULL, df = NULL)
|
|
|
+
|
|
|
+mae(m = NULL, df = NULL)
|
|
|
+
|
|
|
+mpe(m = NULL, df = NULL)
|
|
|
+
|
|
|
+mape(m = NULL, df = NULL)
|
|
|
+
|
|
|
+all_metrics(m = NULL, df = NULL)
|
|
|
+}
|
|
|
+\arguments{
|
|
|
+\item{m}{Prophet object. Default NULL}
|
|
|
+
|
|
|
+\item{df}{A dataframe which is output of `simulated_historical_forecasts` or `cross_validation` Default NULL}
|
|
|
+}
|
|
|
+\value{
|
|
|
+metrics value (numeric)
|
|
|
+}
|
|
|
+\description{
|
|
|
+A time-series forecast requires making a quantitative prediction of future values.
|
|
|
+After forecast, we also have to provide accurracy of forecasts to check wether the forecast serves our need.
|
|
|
+Metrics for time series forecasts are so useful in telling you how your model is good and helping you determine which particular forecasting models work best.
|
|
|
+}
|
|
|
+\details{
|
|
|
+Here, as a notation, we assume that \eqn{y} is the actual value and \eqn{yhat} is the forecast value.
|
|
|
+
|
|
|
+Mean Error (ME, \code{me})
|
|
|
+
|
|
|
+The Mean Error (ME) is defined by the formula:
|
|
|
+\deqn{ \frac{1}{n} \sum_{t=1}^{n} y_{t}-yhat_{t} .}
|
|
|
+
|
|
|
+Mean Squared Error (MSE, \code{mse})
|
|
|
+
|
|
|
+The Mean Squared Error (MSE) is defined by the formula:
|
|
|
+\deqn{ \frac{1}{n} \sum_{t=1}^{n} (y_{t}-yhat_{t})^2 .}
|
|
|
+
|
|
|
+Root Mean Square Error (RMSE, \code{rmse})
|
|
|
+
|
|
|
+Root Mean Square Error (RMSE) is define by the formula:
|
|
|
+\deqn{ \sqrt{\frac{1}{n} \sum_{t=1}^{n} (y_{t}-yhat_{t})^2} .}
|
|
|
+
|
|
|
+Mean Absolute Error (MAE, \code{mae})
|
|
|
+
|
|
|
+The Mean Absolute Error (MAE) is defined by the formula:
|
|
|
+\deqn{ \frac{1}{n} \sum_{t=1}^{n} | y_{t}-yhat_{t} | .}
|
|
|
+
|
|
|
+Mean Percentage Error (MPE, \code{mpe})
|
|
|
+
|
|
|
+The Mean Percentage Error (MPE) is usually expressed as a percentage
|
|
|
+and is defined by the formula:
|
|
|
+\deqn{ \frac{100}{n} \sum_{t=1}^{n} \frac {y_{t}-yhat_{t}}{y_{t}} .}
|
|
|
+
|
|
|
+Mean Absolute Percentage Error (MAPE, \code{mape})
|
|
|
+
|
|
|
+The Mean absolute Percentage Error (MAPE), also known as Mean Absolute Percentage Deviation (MAPD), is usually expressed as a percentage,
|
|
|
+and is defined by the formula:
|
|
|
+\deqn{ \frac{100}{n} \sum_{t=1}^{n} | \frac {y_{t}-yhat_{t}}{y_{t}}| .}
|
|
|
+}
|
|
|
+\examples{
|
|
|
+\dontrun{
|
|
|
+# Create example model
|
|
|
+library(readr)
|
|
|
+library(prophet)
|
|
|
+df <- read_csv('../tests/testthat/data.csv')
|
|
|
+m <- prophet(df)
|
|
|
+future <- make_future_dataframe(m, periods = 365)
|
|
|
+forecast <- predict(m, future)
|
|
|
+all_metrics(forecast)
|
|
|
+df.cv <- cross_validation(m, horizon = 100, units = 'days')
|
|
|
+all_metrics(df.cv)
|
|
|
+# You can check your models's accuracy using me, mse, rmse ...etc.
|
|
|
+print(rmse(m))
|
|
|
+}
|
|
|
+}
|